Se integro el logo en la pagina

This commit is contained in:
mongar 2024-04-30 19:16:16 -05:00
parent 57f0da6a5d
commit 8883868e04
4 changed files with 26 additions and 36 deletions

62
app.py
View File

@ -1,8 +1,8 @@
# import os
# from dotenv import load_dotenv
# from langchain_community.chat_models import ChatOpenAI
import streamlit as st import streamlit as st
import os from chats.streamlit_tools import import_file # ,clear_cache
from dotenv import load_dotenv
from langchain_community.chat_models import ChatOpenAI
from chats.streamlit_tools import import_file, clear_cache
from streamlit_extras.add_vertical_space import add_vertical_space from streamlit_extras.add_vertical_space import add_vertical_space
from langchain_tools.pdf_tools import PdfLangChain from langchain_tools.pdf_tools import PdfLangChain
from langchain_tools.lc_tools import LangChainTools from langchain_tools.lc_tools import LangChainTools
@ -14,31 +14,27 @@ st.set_page_config(page_title="LLMOneClusterTeam")
# sidebar # sidebar
with st.sidebar: with st.sidebar:
# Cargar el logo (asegúrate de que el archivo de imagen esté en la misma carpeta que tu script) # Cargar el logo (asegúrate de que el archivo de imagen esté en la misma carpeta que tu script)
logo_path = "documents/Logo azulblanco.png" logo_path = "documents/Logo azulblanco.png"
# Ajusta el ancho según sea necesario
logo = st.sidebar.image(logo_path, width=200) logo = st.sidebar.image(logo_path, width=200)
add_vertical_space(24) # Ajusta el ancho según sea necesario
add_vertical_space(28)
# pdf_name = import_file() # pdf_name = import_file()
st.markdown("Built by [OneCluster](https://www.onecluster.org/).")
# Crea un botón en Streamlit que llama a la función clear_cache() cuando se presiona
if st.button('Eliminar caché'): col1, col2 = st.columns([1.1, 1])
clear_cache() with col1:
if st.button('Reiniciar'): st.title(
st.experimental_rerun() "DocumentAssist",
st.markdown(
"Built by [OneCluster](https://www.onecluster.org/)."
) )
with col2:
st.title('💬📄 LLM CHat APP') logo_2 = st.image("documents/pdfs/logo_1-removebg-preview.png", width=110)
pdf_name = import_file() pdf_name = import_file()
if pdf_name: if pdf_name:
with st.spinner("Processing the document..."): with st.spinner("Processing the document..."):
# Inicializamos la clase PdfLangChain # Inicializamos la clase PdfLangChain
pdfLangChain = PdfLangChain(pdf_name) pdfLangChain = PdfLangChain(pdf_name)
pdf_name = pdfLangChain.file_name pdf_name = pdfLangChain.file_name
@ -57,9 +53,8 @@ if pdf_name:
# Creamos el vector store # Creamos el vector store
docstorage = langChainTools.create_vector_strore( docstorage = langChainTools.create_vector_strore(
docs_split, docs_split, pdf_name, embedding_model
pdf_name, )
embedding_model)
# Cargamos el modelo LLM desde LangChain # Cargamos el modelo LLM desde LangChain
llm = langChainTools.load_llm_openai() llm = langChainTools.load_llm_openai()
@ -67,9 +62,8 @@ if pdf_name:
# Creamos la cadena que integra Vectorstroe, el LLM para hacer consultas. # Creamos la cadena que integra Vectorstroe, el LLM para hacer consultas.
# Para este caso la cadena tene el parametro de memoria. # Para este caso la cadena tene el parametro de memoria.
qa = langChainTools.define_retrieval_qa_memory( qa = langChainTools.define_retrieval_qa_memory(
llm, docstorage, llm, docstorage, pdf_name, embedding_model
pdf_name, )
embedding_model)
# Store conversation history # Store conversation history
if "messages" not in st.session_state.keys(): if "messages" not in st.session_state.keys():
@ -95,7 +89,7 @@ if pdf_name:
st.sidebar.button("Clear chat history", on_click=clear_chat_history) st.sidebar.button("Clear chat history", on_click=clear_chat_history)
@ st.cache_resource @st.cache_resource
def get_num_tokens(prompt): def get_num_tokens(prompt):
"""Get the number of tokens in a given prompt""" """Get the number of tokens in a given prompt"""
return len(prompt.split()) return len(prompt.split())
@ -111,32 +105,28 @@ if pdf_name:
# Generate a new response if last message is not from assistant # Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant": if st.session_state.messages[-1]["role"] != "assistant":
with st.spinner("Thinking..."): with st.spinner("Thinking..."):
# Creamos la cadena que integra Vectorstroe, el LLM para hacer consultas. # Creamos la cadena que integra Vectorstroe, el LLM para hacer consultas.
# Para este caso la cadena tene el parametro de memoria. # Para este caso la cadena tene el parametro de memoria.
qa = langChainTools.define_retrieval_qa_memory( qa = langChainTools.define_retrieval_qa_memory(
llm, docstorage, llm, docstorage, pdf_name, embedding_model
pdf_name, )
embedding_model)
input = "\n".join([msg["content"] input = "\n".join([msg["content"] for msg in st.session_state.messages])
for msg in st.session_state.messages])
query = qa.invoke({"question": f"{prompt}"}, query = qa.invoke({"question": f"{prompt}"}, return_only_outputs=True)
return_only_outputs=True)
response_text = query["answer"] response_text = query["answer"]
documents_source = query["source_documents"] documents_source = query["source_documents"]
messageManager = MessageManager() messageManager = MessageManager()
citation: str = messageManager.generate_citations( citation: str = messageManager.generate_citations(documents_source)
documents_source)
# st.markdown(citation) # st.markdown(citation)
with st.chat_message("assistant"): with st.chat_message("assistant"):
st.write(response_text) st.write(response_text)
st.session_state.messages.append( st.session_state.messages.append(
{"role": "assistant", "content": response_text}) {"role": "assistant", "content": response_text}
)
expander = st.expander("Fuentes") expander = st.expander("Fuentes")
expander.markdown(citation) expander.markdown(citation)

Binary file not shown.

After

Width:  |  Height:  |  Size: 199 KiB

BIN
documents/pdfs/logo_1.jpeg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 107 KiB