feat: add agents/app
This commit is contained in:
0
agents/app/langchain_tools/__init__.py
Normal file
0
agents/app/langchain_tools/__init__.py
Normal file
0
agents/app/langchain_tools/chains.py
Normal file
0
agents/app/langchain_tools/chains.py
Normal file
55
agents/app/langchain_tools/cost.py
Normal file
55
agents/app/langchain_tools/cost.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import tiktoken
|
||||
|
||||
|
||||
# Model costs
|
||||
LLM_COSTS = {
|
||||
"o1-mini": {
|
||||
"input": 3.00 / 1_000_000, # $3.00 per 1M input tokens
|
||||
"output": 12.00 / 1_000_000, # $12.00 per 1M output tokens
|
||||
},
|
||||
"gpt-4o-mini": {
|
||||
"input": 0.150 / 1_000_000, # $0.150 per 1M input tokens
|
||||
"output": 0.600 / 1_000_000, # $0.600 per 1M output tokens
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def calculate_total_cost(model, input_tokens, output_tokens):
|
||||
"""
|
||||
Calculate the total cost of using a language model based on token usage.
|
||||
|
||||
Parameters:
|
||||
model (str): The model's name (e.g., "gpt-4o-mini").
|
||||
input_tokens (int): The number of input tokens used.
|
||||
output_tokens (int): The number of output tokens generated.
|
||||
|
||||
Returns:
|
||||
float: The total cost in USD.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model name is not found in LLM_COSTS.
|
||||
"""
|
||||
if model not in LLM_COSTS:
|
||||
raise ValueError(f"Model '{model}' not found in LLM_COSTS.")
|
||||
|
||||
# Get per-token costs for the specified model
|
||||
input_cost_per_token = LLM_COSTS[model]["input"]
|
||||
output_cost_per_token = LLM_COSTS[model]["output"]
|
||||
|
||||
# Calculate total cost for input and output tokens
|
||||
total_input_cost = input_tokens * input_cost_per_token
|
||||
total_output_cost = output_tokens * output_cost_per_token
|
||||
|
||||
# Combine the costs
|
||||
total_cost = total_input_cost + total_output_cost
|
||||
|
||||
return total_cost
|
||||
|
||||
|
||||
def calculate_tokens(text):
|
||||
encoding = tiktoken.get_encoding("o200k_base")
|
||||
|
||||
# Calculate the number of tokens
|
||||
num_tokens = len(encoding.encode(text))
|
||||
|
||||
return num_tokens
|
||||
56
agents/app/langchain_tools/llm.py
Normal file
56
agents/app/langchain_tools/llm.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from openai import OpenAI
|
||||
from dotenv import load_dotenv
|
||||
from langchain_openai import ChatOpenAI
|
||||
import os
|
||||
# from .cost import calculate_tokens
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def load_llm_openai(
|
||||
temperature: float = 0.1, max_tokens: int = 2000, model: str = "gpt-4o-mini"
|
||||
) -> ChatOpenAI:
|
||||
llm = ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model=model)
|
||||
return llm
|
||||
|
||||
|
||||
def load_llm_o1(
|
||||
prompt: str,
|
||||
model: str = "o1-mini",
|
||||
max_tokens: int = 2000,
|
||||
temperature: float = 0.1,
|
||||
) -> dict:
|
||||
client = OpenAI()
|
||||
response = client.chat.completions.create(
|
||||
model=model,
|
||||
max_completion_tokens=max_tokens,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
)
|
||||
|
||||
result = {
|
||||
"text": response.choices[0].message.content,
|
||||
"token_input": calculate_tokens(prompt),
|
||||
"token_output": calculate_tokens(response.choices[0].message.content),
|
||||
}
|
||||
return result
|
||||
|
||||
|
||||
# Please install OpenAI SDK first: `pip3 install openai`
|
||||
def load_llm_deepseek():
|
||||
api_key = os.getenv("DEEPSEEK_API_KEY")
|
||||
|
||||
llm = ChatOpenAI(
|
||||
model="deepseek-chat",
|
||||
# model = "deepseek-reasoner",
|
||||
openai_api_base="https://api.deepseek.com",
|
||||
openai_api_key=api_key,
|
||||
)
|
||||
return llm
|
||||
|
||||
# probar el llm de deepseek
|
||||
|
||||
# llm = load_llm_deepseek()
|
||||
# response = llm.invoke("Hello, how are you?")
|
||||
|
||||
# print(response)
|
||||
|
||||
Reference in New Issue
Block a user