diff --git a/app.py b/app.py index e161eb3..d1038f7 100644 --- a/app.py +++ b/app.py @@ -1,8 +1,8 @@ +# import os +# from dotenv import load_dotenv +# from langchain_community.chat_models import ChatOpenAI import streamlit as st -import os -from dotenv import load_dotenv -from langchain_community.chat_models import ChatOpenAI -from chats.streamlit_tools import import_file, clear_cache +from chats.streamlit_tools import import_file # ,clear_cache from streamlit_extras.add_vertical_space import add_vertical_space from langchain_tools.pdf_tools import PdfLangChain from langchain_tools.lc_tools import LangChainTools @@ -14,31 +14,27 @@ st.set_page_config(page_title="LLMOneClusterTeam") # sidebar with st.sidebar: - # Cargar el logo (asegúrate de que el archivo de imagen esté en la misma carpeta que tu script) logo_path = "documents/Logo azulblanco.png" - # Ajusta el ancho según sea necesario logo = st.sidebar.image(logo_path, width=200) - add_vertical_space(24) + # Ajusta el ancho según sea necesario + add_vertical_space(28) # pdf_name = import_file() + st.markdown("Built by [OneCluster](https://www.onecluster.org/).") - # Crea un botón en Streamlit que llama a la función clear_cache() cuando se presiona - if st.button('Eliminar caché'): - clear_cache() - if st.button('Reiniciar'): - st.experimental_rerun() - st.markdown( - "Built by [OneCluster](https://www.onecluster.org/)." + +col1, col2 = st.columns([1.1, 1]) +with col1: + st.title( + "DocumentAssist", ) - -st.title('💬📄 LLM CHat APP') +with col2: + logo_2 = st.image("documents/pdfs/logo_1-removebg-preview.png", width=110) pdf_name = import_file() if pdf_name: - with st.spinner("Processing the document..."): - # Inicializamos la clase PdfLangChain pdfLangChain = PdfLangChain(pdf_name) pdf_name = pdfLangChain.file_name @@ -57,9 +53,8 @@ if pdf_name: # Creamos el vector store docstorage = langChainTools.create_vector_strore( - docs_split, - pdf_name, - embedding_model) + docs_split, pdf_name, embedding_model + ) # Cargamos el modelo LLM desde LangChain llm = langChainTools.load_llm_openai() @@ -67,9 +62,8 @@ if pdf_name: # Creamos la cadena que integra Vectorstroe, el LLM para hacer consultas. # Para este caso la cadena tene el parametro de memoria. qa = langChainTools.define_retrieval_qa_memory( - llm, docstorage, - pdf_name, - embedding_model) + llm, docstorage, pdf_name, embedding_model + ) # Store conversation history if "messages" not in st.session_state.keys(): @@ -95,7 +89,7 @@ if pdf_name: st.sidebar.button("Clear chat history", on_click=clear_chat_history) - @ st.cache_resource + @st.cache_resource def get_num_tokens(prompt): """Get the number of tokens in a given prompt""" return len(prompt.split()) @@ -111,32 +105,28 @@ if pdf_name: # Generate a new response if last message is not from assistant if st.session_state.messages[-1]["role"] != "assistant": with st.spinner("Thinking..."): - # Creamos la cadena que integra Vectorstroe, el LLM para hacer consultas. # Para este caso la cadena tene el parametro de memoria. qa = langChainTools.define_retrieval_qa_memory( - llm, docstorage, - pdf_name, - embedding_model) + llm, docstorage, pdf_name, embedding_model + ) - input = "\n".join([msg["content"] - for msg in st.session_state.messages]) + input = "\n".join([msg["content"] for msg in st.session_state.messages]) - query = qa.invoke({"question": f"{prompt}"}, - return_only_outputs=True) + query = qa.invoke({"question": f"{prompt}"}, return_only_outputs=True) response_text = query["answer"] documents_source = query["source_documents"] messageManager = MessageManager() - citation: str = messageManager.generate_citations( - documents_source) + citation: str = messageManager.generate_citations(documents_source) # st.markdown(citation) with st.chat_message("assistant"): st.write(response_text) st.session_state.messages.append( - {"role": "assistant", "content": response_text}) + {"role": "assistant", "content": response_text} + ) expander = st.expander("Fuentes") expander.markdown(citation) diff --git a/chats/__pycache__/chat_tools.cpython-311.pyc b/chats/__pycache__/chat_tools.cpython-311.pyc index 82c7cc3..6fe77b5 100644 Binary files a/chats/__pycache__/chat_tools.cpython-311.pyc and b/chats/__pycache__/chat_tools.cpython-311.pyc differ diff --git a/documents/pdfs/logo_1-removebg-preview.png b/documents/pdfs/logo_1-removebg-preview.png new file mode 100644 index 0000000..8e14692 Binary files /dev/null and b/documents/pdfs/logo_1-removebg-preview.png differ diff --git a/documents/pdfs/logo_1.jpeg b/documents/pdfs/logo_1.jpeg new file mode 100644 index 0000000..403a0b5 Binary files /dev/null and b/documents/pdfs/logo_1.jpeg differ