56 lines
1.5 KiB
Python
56 lines
1.5 KiB
Python
import tiktoken
|
|
|
|
|
|
# Model costs
|
|
LLM_COSTS = {
|
|
"o1-mini": {
|
|
"input": 3.00 / 1_000_000, # $3.00 per 1M input tokens
|
|
"output": 12.00 / 1_000_000, # $12.00 per 1M output tokens
|
|
},
|
|
"gpt-4o-mini": {
|
|
"input": 0.150 / 1_000_000, # $0.150 per 1M input tokens
|
|
"output": 0.600 / 1_000_000, # $0.600 per 1M output tokens
|
|
},
|
|
}
|
|
|
|
|
|
def calculate_total_cost(model, input_tokens, output_tokens):
|
|
"""
|
|
Calculate the total cost of using a language model based on token usage.
|
|
|
|
Parameters:
|
|
model (str): The model's name (e.g., "gpt-4o-mini").
|
|
input_tokens (int): The number of input tokens used.
|
|
output_tokens (int): The number of output tokens generated.
|
|
|
|
Returns:
|
|
float: The total cost in USD.
|
|
|
|
Raises:
|
|
ValueError: If the model name is not found in LLM_COSTS.
|
|
"""
|
|
if model not in LLM_COSTS:
|
|
raise ValueError(f"Model '{model}' not found in LLM_COSTS.")
|
|
|
|
# Get per-token costs for the specified model
|
|
input_cost_per_token = LLM_COSTS[model]["input"]
|
|
output_cost_per_token = LLM_COSTS[model]["output"]
|
|
|
|
# Calculate total cost for input and output tokens
|
|
total_input_cost = input_tokens * input_cost_per_token
|
|
total_output_cost = output_tokens * output_cost_per_token
|
|
|
|
# Combine the costs
|
|
total_cost = total_input_cost + total_output_cost
|
|
|
|
return total_cost
|
|
|
|
|
|
def calculate_tokens(text):
|
|
encoding = tiktoken.get_encoding("o200k_base")
|
|
|
|
# Calculate the number of tokens
|
|
num_tokens = len(encoding.encode(text))
|
|
|
|
return num_tokens
|