Se hace una depuración inicial del proyecto

This commit is contained in:
Mongar28 2025-03-25 17:50:18 -05:00
parent f397b2d446
commit 61da645cf9
14 changed files with 0 additions and 344 deletions

Binary file not shown.

Binary file not shown.

View File

@ -133,7 +133,6 @@ def search_sale_order(order_id: int):
return response_sale
@tool
def add_lines_to_order(
order_id: int, product: str, unit: str, quantity: str, unitprice: str
@ -210,36 +209,3 @@ tools = [
add_lines_to_order,
search_associate_party_to_contact_mechanism,
]
# if __name__ == "__main__":
# # Crear una orden de venta
# party = 2573
# pickup_location = "at_home"
# order_id = create_sale_order(party=party, pickup_location=pickup_location)
# print(f"\nOrden creada con ID: {order_id}")
# # Agregar líneas a la orden
# product = "1"
# unit = "1"
# quantity = "3"
# unitprice = "15"
# add_line_response = add_lines_to_order(order_id, product, unit, quantity, unitprice)
# print(f"\nRespuesta al agregar línea: {add_line_response.text}")
# # Verificar la orden actualizada
# updated_order = search_sale_order(order_id)
# print(f"\nOrden actualizada: {updated_order.text}")
# # Agregar otra línea a la orden
# product = "2"
# unit = "1"
# quantity = "3"
# unitprice = "15"
# add_line_response = add_lines_to_order(order_id, product, unit, quantity, unitprice)
# print(f"\nRespuesta al agregar línea: {add_line_response.text}")
# # Verificar la orden actualizada
# updated_order = search_sale_order(order_id)
# print(f"\nOrden actualizada: {updated_order.text}")

Binary file not shown.

View File

@ -1,82 +0,0 @@
from seller.catalog_tools import CatalogTrytonTools
import json
# result = json.dumps(response, indent=4)
url = "http://192.168.0.25:8000"
key = "9a9ffc430146447d81e6698240199a4be2b0e774cb18474999d0f60e33b5b1eb1cfff9d9141346a98844879b5a9e787489c891ddc8fb45cc903b7244cab64fb1"
db = "tryton"
application_name = "sale_don_confiao"
catalog = CatalogTrytonTools(url, application_name, key, db)
# Lista de los productos
print("=== Productos ===")
response = catalog.list_products()
products: str = ""
for product in response:
id = product["id"]
name = product["name"]
id_unit_measurement = product["default_uom."]["id"]
products += f"- id: {id} - Nombre: {name} - ID Unidad de Medida: {id_unit_measurement}\n"
print(products)
# Precio del los productos
print("=== Precios de los productos ===")
response = catalog.search_products("Arroz")
precios: str = ""
for product in response:
id = product["id"]
name = product["name"]
precio = product["template."]["list_price"]["decimal"]
id_unit_measurement = product["default_uom."]["id"]
precios += f"- id: {id} - Nombre: {name}: ${precio} - ID Unidad de Medida: {id_unit_measurement}\n"
print(precios)
# Revisar disponibilidad de los productos
# print("=== Disponibilidad de los productos ===")
# response = catalog.search_products("Papa")
# disponibilidad: str = ""
# for product in response:
# id = product["id"]
# name = product["name"]
# stock = product["quantity"]
# disponibilidad += f"- id: {id} - Nombre: {name} - Stock: {stock}\n"
# print(disponibilidad)
# from langgraph_tools.tools.catalog.catalog_tools import (
# list_products,
# search_products,
# check_price,
# )
# def test_catalog_functions():
# print("=== Probando lista de productos ===")
# print(list_products())
# print("\n")
# print("=== Probando búsqueda de productos ===")
# print(search_products("Arroz"))
# print("\n")
# print("=== Probando verificación de precio ===")
# print(check_price("Arroz"))
# print("\n")
# if __name__ == "__main__":
# test_catalog_functions()

View File

@ -1,20 +0,0 @@
# from langchain_tools.llm import load_llm_openai
# import yaml
#
# with open("langgraph_tools/prompts.yaml", "r") as f:
# PROMPTS = yaml.safe_load(f)
#
#
# llm = load_llm_openai()
# query = "Necesito informacion sobre la tienda."
# prompt = PROMPTS["classifier"]["system"].format(query=query)
#
# response: dict = llm.invoke(prompt)
#
# print(response.content)
from langgraph_tools.tools.general_info import get_link_page
link = get_link_page()
print(link)

View File

@ -1,10 +0,0 @@
from dotenv import load_dotenv
from langchain_openai import OpenAIEmbeddings
def load_embeddins():
load_dotenv()
# model = "text-embedding-ada-002"
model = "text-embedding-3-small"
return OpenAIEmbeddings(model=model)

View File

@ -1,17 +0,0 @@
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
def load_llm_openai():
load_dotenv()
# model = "gpt-3.5-turbo-0125"
# model = "gpt-4o"
model = "gpt-4o-mini"
llm = ChatOpenAI(
model=model,
temperature=0.1,
max_tokens=2000,
)
return llm

View File

@ -1,46 +0,0 @@
from langchain.chains import create_history_aware_retriever
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
def create_rag_chain(llm, retriever):
contextualize_q_system_prompt = """
Given a chat history and the latest user question \
which might reference context in the chat history,
formulate a standalone question \
which can be understood without the chat history.
Do NOT answer the question, \
just reformulate it if needed and otherwise return it as is.
"""
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
history_aware_retriever = create_history_aware_retriever(
llm, retriever, contextualize_q_prompt
)
# ___________________Chain con el chat history_______________________-
qa_system_prompt = """
You are an assistant for question-answering tasks. \
Use the following pieces of retrieved context to answer the question. \
If you don't know the answer, just say that you don't know. \
The length of the answer should be sufficient to address
what is being asked, \
meaning don't limit yourself in length.\
{context}"""
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", qa_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
return create_retrieval_chain(
history_aware_retriever, question_answer_chain)

View File

@ -1,18 +0,0 @@
from langchain_chroma import Chroma
def create_retriever(embeddings, persist_directory: str):
# Cargamos la vectorstore
# vectordb = Chroma.from_documents(
# persist_directory=st.session_state.persist_directory,
# Este es el directorio del la vs del docuemnto del usuario
# que se encuentra cargado en la session_state.
# embedding_function=embeddings,
# )
vectordb = Chroma(
persist_directory=persist_directory,
embedding_function=embeddings,
)
# Creamos el retriver para que retorne los fragmentos mas relevantes.
return vectordb.as_retriever(search_kwargs={"k": 6})

View File

@ -1,42 +0,0 @@
from langchain_community.document_loaders.pdf import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
# def load_split_docs(file_name: str) -> list:
# file_path: str = os.path.join("documents", "pdfs", file_name)
# loader = PyPDFLoader(file_path)
# docs: list = loader.load()
# chunk_size: int = 2000
# chunk_overlap: int = 300
#
# splitter = RecursiveCharacterTextSplitter(
# chunk_size=chunk_size, chunk_overlap=chunk_overlap
# )
# docs_split: list = splitter.split_documents(docs)
#
# return docs_split
def load_split_docs(file_name: str) -> list:
# Obtener el directorio base del proyecto
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Construir la ruta absoluta al PDF
file_path = os.path.join(base_dir, "documents", "pdfs", file_name)
# Verificar si el archivo existe
if not os.path.exists(file_path):
print(f"Archivo no encontrado en: {file_path}")
raise FileNotFoundError(f"No se encontró el archivo en: {file_path}")
loader = PyPDFLoader(file_path)
docs: list = loader.load()
chunk_size: int = 2000
chunk_overlap: int = 300
splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
docs_split: list = splitter.split_documents(docs)
return docs_split

View File

@ -1,48 +0,0 @@
from langchain_chroma import Chroma
import os
#
# def create_vectorstore(docs_split: list, embeddings, file_name: str):
# db_name: str = file_name.replace(".pdf", "").replace(" ", "_").lower()
# persist_directory: str = f"embeddings/{db_name}"
#
# # Crear el directorio si no existe
# os.makedirs(persist_directory, exist_ok=True)
#
# # Siempre crear/actualizar el vectorstore
# vectordb = Chroma.from_documents(
# persist_directory=persist_directory,
# documents=docs_split,
# embedding=embeddings,
# )
#
# return vectordb
def create_vectorstore(docs_split: list, embeddings, file_name: str):
# Obtener el directorio base del proyecto
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Crear el nombre de la base de datos
db_name: str = file_name.replace(".pdf", "").replace(" ", "_").lower()
# Construir la ruta absoluta para los embeddings
persist_directory: str = os.path.join(base_dir, "embeddings", db_name)
# Crear el directorio si no existe
os.makedirs(persist_directory, exist_ok=True)
# Debug log
print(f"Creando vectorstore en: {persist_directory}")
try:
# Crear/actualizar el vectorstore
vectordb = Chroma.from_documents(
persist_directory=persist_directory,
documents=docs_split,
embedding=embeddings,
)
return vectordb
except Exception as e:
print(f"Error al crear vectorstore: {e}")
raise

View File

@ -1,27 +0,0 @@
#!/usr/bin/env python3
from app.langgraph_tools.tools.orders.order_tools_2 import create_party
import json
# Prueba de la función create_party
def test_create_party():
# Parámetros de ejemplo
party_full_name = "Cristian Garces"
contact_method_type = "email"
contact_method_value = "cristian.garces@example.com"
# Llamar a la función
response = create_party(
party_full_name=party_full_name,
contact_method_type=contact_method_type,
contact_method_value=contact_method_value
)
# Imprimir la respuesta
print("Status Code:", response.status_code)
try:
print("Response:", json.loads(response.text))
except json.JSONDecodeError:
print("Raw Response:", response.text)
if __name__ == "__main__":
test_create_party()