200 lines
6.6 KiB
Python
200 lines
6.6 KiB
Python
from typing import TypedDict, Annotated, List
|
|
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
|
from langgraph.prebuilt import create_react_agent
|
|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
from dotenv import load_dotenv
|
|
from .tools.general_info import tools as general_info_tools
|
|
|
|
# from .tools.catalog.catalog_tools import tools as catalog_tools
|
|
from .tools.catalog.catalog_tools import tools as catalog_tools
|
|
from .tools.orders.order_tools import tools as order_tools
|
|
from .tools.orders.order_tools_2 import tools as order_tools_2
|
|
from app.langchain_tools.llm import load_llm_openai
|
|
import yaml
|
|
|
|
load_dotenv()
|
|
|
|
with open("app/langgraph_tools/prompts.yaml", "r") as f:
|
|
PROMPTS = yaml.safe_load(f)
|
|
|
|
|
|
class ChatBotState(TypedDict):
|
|
messages: Annotated[List[BaseMessage], "add_messages"]
|
|
query: str
|
|
category: str
|
|
response: str
|
|
phone: str
|
|
|
|
|
|
def classifier_agent(state: ChatBotState) -> ChatBotState:
|
|
"""
|
|
Agente clasificador que procesa el estado del chat y determina la categoría.
|
|
"""
|
|
llm = load_llm_openai()
|
|
|
|
# Crear el prompt template - Incluyendo tanto el system message como el query
|
|
prompt_template = ChatPromptTemplate.from_messages(
|
|
[
|
|
("system", PROMPTS["classifier"]["system"]), # Este ya incluye {query}
|
|
MessagesPlaceholder(variable_name="messages"),
|
|
]
|
|
)
|
|
|
|
# Preparar los mensajes con la query actual
|
|
current_messages = state["messages"] + [HumanMessage(content=state["query"])]
|
|
|
|
# Preparar inputs con ambas variables requeridas por el prompt
|
|
inputs = {
|
|
"messages": current_messages,
|
|
"query": state["query"], # Necesario porque el prompt usa {query}
|
|
}
|
|
|
|
# Invocar el LLM
|
|
response = llm.invoke(prompt_template.invoke(inputs))
|
|
|
|
# Validar y normalizar la respuesta
|
|
category = response.content.strip().lower()
|
|
valid_categories = {"general_info", "catalog", "order"}
|
|
|
|
if category not in valid_categories:
|
|
# Si estamos en un contexto de pedido, default a "order"
|
|
if any(
|
|
word in state["query"].lower()
|
|
for word in [
|
|
"calle",
|
|
"carrera",
|
|
"avenida",
|
|
"dirección",
|
|
"confirmar",
|
|
"pedido",
|
|
]
|
|
):
|
|
category = "order"
|
|
else:
|
|
category = "catalog" # Categoría por defecto más segura
|
|
|
|
return {
|
|
**state,
|
|
"category": category,
|
|
"messages": state["messages"],
|
|
}
|
|
|
|
|
|
def general_info_agent(state: ChatBotState) -> ChatBotState:
|
|
try:
|
|
llm = load_llm_openai()
|
|
|
|
# Crear el prompt template para el agente
|
|
prompt = ChatPromptTemplate.from_messages(
|
|
[
|
|
("system", PROMPTS["general_info"]["system"].format(telefono=state["phone"])),
|
|
MessagesPlaceholder(variable_name="messages"),
|
|
]
|
|
)
|
|
|
|
# Crear el agente React con las herramientas
|
|
agent = create_react_agent(
|
|
model=llm, tools=general_info_tools, state_modifier=prompt
|
|
)
|
|
|
|
# Preparar los mensajes incluyendo la query actual
|
|
current_messages = state["messages"] + [HumanMessage(content=state["query"])]
|
|
|
|
# Crear la entrada del agente
|
|
inputs = {"messages": current_messages}
|
|
|
|
# Ejecutar el agente y obtener la respuesta
|
|
response = agent.invoke(inputs)
|
|
|
|
return {
|
|
"messages": response["messages"], # Actualizar con todos los mensajes
|
|
"response": response["messages"][-1].content,
|
|
}
|
|
|
|
except Exception as e:
|
|
error_message = f"Lo siento, hubo un error: {str(e)}"
|
|
return {
|
|
"messages": state["messages"] + [AIMessage(content=error_message)],
|
|
"response": error_message,
|
|
}
|
|
|
|
|
|
def catalog_agent(state: ChatBotState) -> ChatBotState:
|
|
try:
|
|
llm = load_llm_openai()
|
|
|
|
# Crear el prompt template para el agente
|
|
prompt = ChatPromptTemplate.from_messages(
|
|
[
|
|
("system", PROMPTS["catalog"]["system"].format(telefono=state["phone"])),
|
|
MessagesPlaceholder(variable_name="messages"),
|
|
]
|
|
)
|
|
|
|
# Crear el agente React con las herramientas
|
|
agent = create_react_agent(
|
|
model=llm, tools=catalog_tools, state_modifier=prompt
|
|
)
|
|
|
|
# Preparar los mensajes incluyendo la query actual
|
|
current_messages = state["messages"] + [HumanMessage(content=state["query"])]
|
|
|
|
# Crear la entrada del agente
|
|
inputs = {"messages": current_messages}
|
|
|
|
# Ejecutar el agente y obtener la respuesta
|
|
response = agent.invoke(inputs)
|
|
|
|
return {
|
|
"messages": response["messages"],
|
|
"response": response["messages"][-1].content,
|
|
}
|
|
|
|
except Exception as e:
|
|
error_message = f"Lo siento, hubo un error en el catálogo: {str(e)}"
|
|
return {
|
|
"messages": state["messages"] + [AIMessage(content=error_message)],
|
|
"response": error_message,
|
|
}
|
|
|
|
|
|
def order_agent(state: ChatBotState) -> ChatBotState:
|
|
try:
|
|
llm = load_llm_openai()
|
|
|
|
# Crear el prompt template para el agente
|
|
prompt = ChatPromptTemplate.from_messages(
|
|
[
|
|
("system", PROMPTS["order"]["system"].format(telefono=state["phone"])),
|
|
MessagesPlaceholder(variable_name="messages"),
|
|
]
|
|
)
|
|
|
|
# Crear el agente React con las herramientas de órdenes
|
|
# agent = create_react_agent(model=llm, tools=order_tools, state_modifier=prompt)
|
|
agent = create_react_agent(model=llm, tools=order_tools_2, state_modifier=prompt)
|
|
|
|
# Preparar los mensajes incluyendo la query actual
|
|
current_messages = state["messages"] + [HumanMessage(content=state["query"])]
|
|
|
|
# Crear la entrada del agente
|
|
inputs = {"messages": current_messages}
|
|
|
|
# Ejecutar el agente y obtener la respuesta
|
|
response = agent.invoke(inputs)
|
|
|
|
# Mantener el estado original y actualizar solo los campos necesarios
|
|
return {
|
|
**state, # Mantener todas las propiedades del estado original
|
|
"messages": response["messages"],
|
|
"response": response["messages"][-1].content,
|
|
}
|
|
|
|
except Exception as e:
|
|
error_message = f"Lo siento, hubo un error en el manejo de la orden: {str(e)}"
|
|
return {
|
|
**state, # Mantener todas las propiedades del estado original
|
|
"messages": state["messages"] + [AIMessage(content=error_message)],
|
|
"response": error_message,
|
|
}
|