Se integra a la apliacion un LLM open source de HuggingFace
This commit is contained in:
Binary file not shown.
@@ -7,12 +7,11 @@ from langchain.memory.buffer import ConversationBufferMemory
|
||||
import os
|
||||
import streamlit as st
|
||||
from dotenv import load_dotenv
|
||||
from langchain.chains import (
|
||||
RetrievalQAWithSourcesChain,
|
||||
ConversationalRetrievalChain)
|
||||
from langchain.chains import RetrievalQAWithSourcesChain, ConversationalRetrievalChain
|
||||
from langchain_community.llms import HuggingFaceEndpoint
|
||||
|
||||
|
||||
class LangChainTools():
|
||||
class LangChainTools:
|
||||
"""
|
||||
Esta clase maneja algunas herramientas integraciones con las que
|
||||
cuenta LangChain.
|
||||
@@ -27,7 +26,7 @@ class LangChainTools():
|
||||
|
||||
# Cargamos la variable que contiene la api_key de OpenAI
|
||||
load_dotenv()
|
||||
openai_api_key = os.getenv('api_key')
|
||||
openai_api_key = os.getenv("api_key")
|
||||
# Define an OpenAI embeddings model
|
||||
self.embedding_model = OpenAIEmbeddings(openai_api_key=openai_api_key)
|
||||
# st.success('El modelo de embeddins de OpneAI se ha cargado')
|
||||
@@ -35,7 +34,9 @@ class LangChainTools():
|
||||
return self.embedding_model
|
||||
|
||||
@st.cache_resource
|
||||
def create_vector_strore(_self, _docs_split: list, _file_name: str, _embedding_model):
|
||||
def create_vector_strore(
|
||||
_self, _docs_split: list, _file_name: str, _embedding_model
|
||||
):
|
||||
"""Esta funcion construye un vector store a partir de un documento
|
||||
|
||||
Args:
|
||||
@@ -43,7 +44,7 @@ class LangChainTools():
|
||||
_file_name (str): Nombre del documento
|
||||
"""
|
||||
|
||||
db_name = _file_name.replace('.pdf', '').replace(' ', '_').lower()
|
||||
db_name = _file_name.replace(".pdf", "").replace(" ", "_").lower()
|
||||
|
||||
# Cargamos el modelo de embeddings
|
||||
# _embedding_model = self._embedding_model
|
||||
@@ -53,13 +54,14 @@ class LangChainTools():
|
||||
|
||||
if os.path.exists(persist_directory):
|
||||
vectordb = Chroma(
|
||||
persist_directory=persist_directory,
|
||||
embedding_function=_embedding_model)
|
||||
persist_directory=persist_directory, embedding_function=_embedding_model
|
||||
)
|
||||
else:
|
||||
vectordb = Chroma.from_documents(
|
||||
persist_directory=persist_directory,
|
||||
documents=_docs_split,
|
||||
embedding=_embedding_model)
|
||||
embedding=_embedding_model,
|
||||
)
|
||||
|
||||
vectordb.persist()
|
||||
|
||||
@@ -74,16 +76,44 @@ class LangChainTools():
|
||||
|
||||
# Cargamos la variable que contiene la api_key de OpenAI
|
||||
load_dotenv()
|
||||
openai_api_key = os.getenv('api_key')
|
||||
openai_api_key = os.getenv("api_key")
|
||||
|
||||
temperature = 0.5
|
||||
llm_openai = ChatOpenAI(model_name="gpt-3.5-turbo",
|
||||
temperature=temperature,
|
||||
openai_api_key=openai_api_key,
|
||||
max_tokens=1000)
|
||||
llm_openai = ChatOpenAI(
|
||||
model_name="gpt-3.5-turbo",
|
||||
temperature=temperature,
|
||||
openai_api_key=openai_api_key,
|
||||
max_tokens=1000,
|
||||
)
|
||||
|
||||
return llm_openai
|
||||
|
||||
def load_llm_open_source(self):
|
||||
"""Esta funcion carga un modelo de LLM OpenSource desde HuggingFace
|
||||
|
||||
Returns:
|
||||
_type_: Retorno a un objetito de tipo LLM de OpenAI
|
||||
"""
|
||||
# model_huggingface = "google/gemma-1.1-7b-it" # Es buena y funciona en espanol
|
||||
# model_huggingface = (
|
||||
# "google/gemma-1.1-2b-it" # Es buena y funciona en espanol funciona rapido
|
||||
# )
|
||||
# model_huggingface = 'tiiuae/falcon-7b-instruct'
|
||||
# model_huggingface = 'mistralai/Mistral-7B-Instruct-v0.2'
|
||||
huggingfacehub_api_token = "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"
|
||||
|
||||
model_huggingface = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Es buena y funciona en espanol funciona rapido
|
||||
|
||||
# Define the LLM
|
||||
llm = HuggingFaceEndpoint(
|
||||
repo_id=model_huggingface,
|
||||
huggingfacehub_api_token=huggingfacehub_api_token,
|
||||
temperature=0.5,
|
||||
max_new_tokens=500,
|
||||
)
|
||||
|
||||
return llm
|
||||
|
||||
def load_prompt_template(self):
|
||||
"""Esta funcion construye un prompt template de lanfchain.
|
||||
|
||||
@@ -97,13 +127,12 @@ class LangChainTools():
|
||||
Respuesta:"""
|
||||
|
||||
prompt_template = PromptTemplate(
|
||||
template=template, input_variables=["question"])
|
||||
template=template, input_variables=["question"]
|
||||
)
|
||||
|
||||
return prompt_template
|
||||
|
||||
def define_retrieval_qa(
|
||||
self, _llm, _vectordb, _file_name, _embedding_model
|
||||
):
|
||||
def define_retrieval_qa(self, _llm, _vectordb, _file_name, _embedding_model):
|
||||
"""Esta función integra un LLM y una base de datos vectorial en una
|
||||
chain de LangChain para hacer requerimientos. Este modelo no integra memoria.
|
||||
|
||||
@@ -120,14 +149,14 @@ class LangChainTools():
|
||||
y la BDV.
|
||||
"""
|
||||
|
||||
db_name = _file_name.replace('.pdf', '').replace(' ', '_').lower()
|
||||
db_name = _file_name.replace(".pdf", "").replace(" ", "_").lower()
|
||||
|
||||
# Verificamos si existe la vector strore
|
||||
persist_directory = f"embeddings/{db_name}"
|
||||
|
||||
_vectordb = Chroma(
|
||||
persist_directory=persist_directory,
|
||||
embedding_function=_embedding_model)
|
||||
persist_directory=persist_directory, embedding_function=_embedding_model
|
||||
)
|
||||
|
||||
# Define the Retrieval QA Chain to integrate the database and LLM
|
||||
qa = RetrievalQAWithSourcesChain.from_chain_type(
|
||||
@@ -161,28 +190,35 @@ class LangChainTools():
|
||||
y la BDV.
|
||||
"""
|
||||
|
||||
db_name = _file_name.replace('.pdf', '').replace(' ', '_').lower()
|
||||
db_name = _file_name.replace(".pdf", "").replace(" ", "_").lower()
|
||||
|
||||
# Verificamos si existe la vector strore
|
||||
persist_directory = f"embeddings/{db_name}"
|
||||
|
||||
_vectordb = Chroma(
|
||||
persist_directory=persist_directory,
|
||||
embedding_function=_embedding_model)
|
||||
persist_directory=persist_directory, embedding_function=_embedding_model
|
||||
)
|
||||
|
||||
# Configura la memoria
|
||||
memory = ConversationBufferMemory(
|
||||
memory_key="chat_history",
|
||||
return_messages=True,
|
||||
output_key='answer')
|
||||
memory_key="chat_history", return_messages=True, output_key="answer"
|
||||
)
|
||||
|
||||
# Define the Retrieval QA Chain to integrate the database and LLM
|
||||
conversation = ConversationalRetrievalChain.from_llm(
|
||||
_llm,
|
||||
retriever=_vectordb.as_retriever(),
|
||||
memory=memory,
|
||||
verbose=False, # Modo verboso
|
||||
return_source_documents=True # Devuelve los documentos fuente
|
||||
verbose=True, # Modo verboso
|
||||
return_source_documents=True, # Devuelve los documentos fuente
|
||||
)
|
||||
|
||||
template = """Utiliza los siguientes fragmentos de contexto para responder la pregunta al final. Si no sabes la respuesta, simplemente di que no sabes, no intentes inventar una respuesta. La respuesta dala con un formateo de markdown. Responde a la pregunta siempre en español.
|
||||
|
||||
{context}
|
||||
|
||||
Pregunta: {question}
|
||||
Respuesta:"""
|
||||
conversation.combine_docs_chain.llm_chain.prompt.template = template
|
||||
|
||||
return conversation
|
||||
|
||||
Reference in New Issue
Block a user