109 lines
3.0 KiB
Python
109 lines
3.0 KiB
Python
from dotenv import load_dotenv
|
|
from langchain_openai import ChatOpenAI
|
|
from langchain_core.tools import tool
|
|
from datetime import datetime, timezone
|
|
from googleapiclient.discovery import build
|
|
from app.rag.split_docs import load_split_docs
|
|
from app.rag.llm import load_llm_openai
|
|
from app.rag.embeddings import load_embeddins
|
|
from app.rag.retriever import create_retriever
|
|
from app.rag.vectorstore import create_vectorstore
|
|
from app.rag.rag_chain import create_rag_chain
|
|
import pytz
|
|
import telebot
|
|
import os
|
|
|
|
|
|
class LangChainTools:
|
|
def load_llm_openai(self):
|
|
load_dotenv()
|
|
# model = "gpt-3.5-turbo-0125"
|
|
# model = "gpt-4o"
|
|
model = "gpt-4o-mini"
|
|
|
|
llm = ChatOpenAI(
|
|
model=model,
|
|
temperature=0.1,
|
|
max_tokens=2000,
|
|
)
|
|
return llm
|
|
|
|
|
|
@tool
|
|
def redact_email(topic: str) -> str:
|
|
"""Use this tool to draft the content of an email based on a topic."""
|
|
|
|
# Load LLM model
|
|
langChainTools = LangChainTools()
|
|
|
|
llm = langChainTools.load_llm_openai()
|
|
# Create prompt for the LLM
|
|
prompt = (
|
|
"Please redact a email based on the topic:\n\n"
|
|
"Topic: {}\n\n"
|
|
"Email Content: [Your email content here]"
|
|
).format(topic)
|
|
|
|
response = llm.invoke(prompt)
|
|
return response
|
|
|
|
|
|
@tool
|
|
def send_message(message: str):
|
|
"""Use this function when you need to communicate with Cristian."""
|
|
# Configuración del bot
|
|
load_dotenv()
|
|
API_TOKEN_BOT = os.getenv("API_TOKEN_BOT")
|
|
bot = telebot.TeleBot(API_TOKEN_BOT)
|
|
|
|
# Escapar caracteres especiales en Markdown
|
|
from telebot.util import escape_markdown
|
|
|
|
safe_message = escape_markdown(message)
|
|
|
|
# Enviar mensaje usando MarkdownV2
|
|
bot.send_message(chat_id="5076346205", text=safe_message,
|
|
parse_mode="Markdown")
|
|
|
|
|
|
@tool
|
|
def get_company_info(prompt: str) -> str:
|
|
"""
|
|
Use this function when you need more information
|
|
about the services offered by OneCluster.
|
|
"""
|
|
file_path: str = "onecluster_info.pdf"
|
|
|
|
try:
|
|
docs_split: list = load_split_docs(file_path)
|
|
embeddings_model = load_embeddins()
|
|
llm = load_llm_openai()
|
|
|
|
# Usar el nombre corregido de la función
|
|
create_vectorstore(docs_split, embeddings_model, file_path)
|
|
|
|
retriever = create_retriever(
|
|
embeddings_model, persist_directory="embeddings/onecluster_info"
|
|
)
|
|
qa = create_rag_chain(llm, retriever)
|
|
|
|
response = qa.invoke({"input": prompt, "chat_history": []})
|
|
|
|
return response["answer"]
|
|
except Exception as e:
|
|
print(f"Error en get_company_info: {e}")
|
|
return f"Lo siento, hubo un error al procesar la información: {str(e)}"
|
|
|
|
|
|
@tool
|
|
def get_current_date_and_time():
|
|
"""
|
|
Use this function when you need to know the current date and time.
|
|
|
|
Returns:
|
|
str: Current date and time in Bogotá, Colombia.
|
|
"""
|
|
bogota_tz = pytz.timezone("America/Bogota")
|
|
current_date_and_time = datetime.now(bogota_tz)
|
|
return current_date_and_time.strftime("%Y-%m-%d %H:%M:%S")
|