| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134 | import base64import loggingfrom typing import Optional, castimport numpy as npfrom sqlalchemy.exc import IntegrityErrorfrom configs import dify_configfrom core.entities.embedding_type import EmbeddingInputTypefrom core.model_manager import ModelInstancefrom core.model_runtime.entities.model_entities import ModelPropertyKeyfrom core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModelfrom core.rag.embedding.embedding_base import Embeddingsfrom extensions.ext_database import dbfrom extensions.ext_redis import redis_clientfrom libs import helperfrom models.dataset import Embeddinglogger = logging.getLogger(__name__)class CacheEmbedding(Embeddings):    def __init__(self, model_instance: ModelInstance, user: Optional[str] = None) -> None:        self._model_instance = model_instance        self._user = user    def embed_documents(self, texts: list[str]) -> list[list[float]]:        """Embed search docs in batches of 10."""        # use doc embedding cache or store if not exists        text_embeddings = [None for _ in range(len(texts))]        embedding_queue_indices = []        for i, text in enumerate(texts):            hash = helper.generate_text_hash(text)            embedding = (                db.session.query(Embedding)                .filter_by(                    model_name=self._model_instance.model, hash=hash, provider_name=self._model_instance.provider                )                .first()            )            if embedding:                text_embeddings[i] = embedding.get_embedding()            else:                embedding_queue_indices.append(i)        if embedding_queue_indices:            embedding_queue_texts = [texts[i] for i in embedding_queue_indices]            embedding_queue_embeddings = []            try:                model_type_instance = cast(TextEmbeddingModel, self._model_instance.model_type_instance)                model_schema = model_type_instance.get_model_schema(                    self._model_instance.model, self._model_instance.credentials                )                max_chunks = (                    model_schema.model_properties[ModelPropertyKey.MAX_CHUNKS]                    if model_schema and ModelPropertyKey.MAX_CHUNKS in model_schema.model_properties                    else 1                )                for i in range(0, len(embedding_queue_texts), max_chunks):                    batch_texts = embedding_queue_texts[i : i + max_chunks]                    embedding_result = self._model_instance.invoke_text_embedding(                        texts=batch_texts, user=self._user, input_type=EmbeddingInputType.DOCUMENT                    )                    for vector in embedding_result.embeddings:                        try:                            normalized_embedding = (vector / np.linalg.norm(vector)).tolist()                            embedding_queue_embeddings.append(normalized_embedding)                        except IntegrityError:                            db.session.rollback()                        except Exception as e:                            logging.exception("Failed transform embedding")                cache_embeddings = []                try:                    for i, embedding in zip(embedding_queue_indices, embedding_queue_embeddings):                        text_embeddings[i] = embedding                        hash = helper.generate_text_hash(texts[i])                        if hash not in cache_embeddings:                            embedding_cache = Embedding(                                model_name=self._model_instance.model,                                hash=hash,                                provider_name=self._model_instance.provider,                            )                            embedding_cache.set_embedding(embedding)                            db.session.add(embedding_cache)                            cache_embeddings.append(hash)                    db.session.commit()                except IntegrityError:                    db.session.rollback()            except Exception as ex:                db.session.rollback()                logger.exception("Failed to embed documents: %s")                raise ex        return text_embeddings    def embed_query(self, text: str) -> list[float]:        """Embed query text."""        # use doc embedding cache or store if not exists        hash = helper.generate_text_hash(text)        embedding_cache_key = f"{self._model_instance.provider}_{self._model_instance.model}_{hash}"        embedding = redis_client.get(embedding_cache_key)        if embedding:            redis_client.expire(embedding_cache_key, 600)            decoded_embedding = np.frombuffer(base64.b64decode(embedding), dtype="float")            return [float(x) for x in decoded_embedding]        try:            embedding_result = self._model_instance.invoke_text_embedding(                texts=[text], user=self._user, input_type=EmbeddingInputType.QUERY            )            embedding_results = embedding_result.embeddings[0]            embedding_results = (embedding_results / np.linalg.norm(embedding_results)).tolist()        except Exception as ex:            if dify_config.DEBUG:                logging.exception(f"Failed to embed query text '{text[:10]}...({len(text)} chars)'")            raise ex        try:            # encode embedding to base64            embedding_vector = np.array(embedding_results)            vector_bytes = embedding_vector.tobytes()            # Transform to Base64            encoded_vector = base64.b64encode(vector_bytes)            # Transform to string            encoded_str = encoded_vector.decode("utf-8")            redis_client.setex(embedding_cache_key, 600, encoded_str)        except Exception as ex:            if dify_config.DEBUG:                logging.exception(f"Failed to add embedding to redis for the text '{text[:10]}...({len(text)} chars)'")            raise ex        return embedding_results
 |