57 lines
1.3 KiB
Python
57 lines
1.3 KiB
Python
from openai import OpenAI
|
|
from dotenv import load_dotenv
|
|
from langchain_openai import ChatOpenAI
|
|
import os
|
|
# from .cost import calculate_tokens
|
|
|
|
load_dotenv()
|
|
|
|
|
|
def load_llm_openai(
|
|
temperature: float = 0.1, max_tokens: int = 2000, model: str = "gpt-4o-mini"
|
|
) -> ChatOpenAI:
|
|
llm = ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model=model)
|
|
return llm
|
|
|
|
|
|
def load_llm_o1(
|
|
prompt: str,
|
|
model: str = "o1-mini",
|
|
max_tokens: int = 2000,
|
|
temperature: float = 0.1,
|
|
) -> dict:
|
|
client = OpenAI()
|
|
response = client.chat.completions.create(
|
|
model=model,
|
|
max_completion_tokens=max_tokens,
|
|
messages=[{"role": "user", "content": prompt}],
|
|
)
|
|
|
|
result = {
|
|
"text": response.choices[0].message.content,
|
|
"token_input": calculate_tokens(prompt),
|
|
"token_output": calculate_tokens(response.choices[0].message.content),
|
|
}
|
|
return result
|
|
|
|
|
|
# Please install OpenAI SDK first: `pip3 install openai`
|
|
def load_llm_deepseek():
|
|
api_key = os.getenv("DEEPSEEK_API_KEY")
|
|
|
|
llm = ChatOpenAI(
|
|
model="deepseek-chat",
|
|
# model = "deepseek-reasoner",
|
|
openai_api_base="https://api.deepseek.com",
|
|
openai_api_key=api_key,
|
|
)
|
|
return llm
|
|
|
|
# probar el llm de deepseek
|
|
|
|
# llm = load_llm_deepseek()
|
|
# response = llm.invoke("Hello, how are you?")
|
|
|
|
# print(response)
|
|
|