diff --git a/app.py b/app.py index 6ba2143..0101459 100644 --- a/app.py +++ b/app.py @@ -60,7 +60,10 @@ if pdf_name: ) # Cargamos el modelo LLM desde LangChain - llm = langChainTools.load_llm_open_source() + # llm = langChainTools.load_llm_open_source() + + # Cargamos el modelo LLm de Ollama + llm = langChainTools.load_llm_ollama() # Creamos la cadena que integra Vectorstroe, el LLM para hacer consultas. # Para este caso la cadena tene el parametro de memoria. diff --git a/chats/__pycache__/streamlit_tools.cpython-311.pyc b/chats/__pycache__/streamlit_tools.cpython-311.pyc index 3930f35..b2463fd 100644 Binary files a/chats/__pycache__/streamlit_tools.cpython-311.pyc and b/chats/__pycache__/streamlit_tools.cpython-311.pyc differ diff --git a/embeddings/socialbigdatasociologiaycienciassocialescomputacio/eaf7d8a5-f2d4-4b12-a6d3-94efe772d2fd/data_level0.bin b/embeddings/socialbigdatasociologiaycienciassocialescomputacio/eaf7d8a5-f2d4-4b12-a6d3-94efe772d2fd/data_level0.bin index 7775772..ea3192e 100644 Binary files a/embeddings/socialbigdatasociologiaycienciassocialescomputacio/eaf7d8a5-f2d4-4b12-a6d3-94efe772d2fd/data_level0.bin and b/embeddings/socialbigdatasociologiaycienciassocialescomputacio/eaf7d8a5-f2d4-4b12-a6d3-94efe772d2fd/data_level0.bin differ diff --git a/embeddings/socialbigdatasociologiaycienciassocialescomputacio/eaf7d8a5-f2d4-4b12-a6d3-94efe772d2fd/length.bin b/embeddings/socialbigdatasociologiaycienciassocialescomputacio/eaf7d8a5-f2d4-4b12-a6d3-94efe772d2fd/length.bin index a20ee2c..1dc89f8 100644 Binary files a/embeddings/socialbigdatasociologiaycienciassocialescomputacio/eaf7d8a5-f2d4-4b12-a6d3-94efe772d2fd/length.bin and b/embeddings/socialbigdatasociologiaycienciassocialescomputacio/eaf7d8a5-f2d4-4b12-a6d3-94efe772d2fd/length.bin differ diff --git a/langchain_tools/__pycache__/lc_tools.cpython-311.pyc b/langchain_tools/__pycache__/lc_tools.cpython-311.pyc index f2454a1..3dcd76d 100644 Binary files a/langchain_tools/__pycache__/lc_tools.cpython-311.pyc and b/langchain_tools/__pycache__/lc_tools.cpython-311.pyc differ diff --git a/langchain_tools/lc_tools.py b/langchain_tools/lc_tools.py index 929e737..a75ab81 100644 --- a/langchain_tools/lc_tools.py +++ b/langchain_tools/lc_tools.py @@ -10,6 +10,7 @@ from langchain.chains import RetrievalQAWithSourcesChain, ConversationalRetrieva from langchain_community.llms import HuggingFaceEndpoint from langchain_community.embeddings import HuggingFaceEmbeddings from streamlit.runtime.state import session_state +from langchain_community.llms import Ollama class LangChainTools: @@ -150,6 +151,26 @@ class LangChainTools: return llm + @st.cache_resource + def load_llm_ollama(_self): + """Esta funcion carga un modelo de LLM OpenSource desde Ollama + + Returns: + _type_: Retorno a un objetito de tipo LLM de OpenAI + """ + + # Elegimos el modelo de Ollama que utilizaremos + + model: str = "gemma:2b" + + llm = Ollama( + model=model, + temperature=0.1, + num_ctx=1000, + ) + + return llm + def define_retrieval_qa(self, _llm, _vectordb, _file_name, _embedding_model): """Esta función integra un LLM y una base de datos vectorial en una chain de LangChain para hacer requerimientos. Este modelo no integra memoria.