Skip to content

Commit

Permalink
Merge pull request #63 from joaonadkarni/use-module-logger-instead-of…
Browse files Browse the repository at this point in the history
…-root-logger

Use module logger instead of root logger
  • Loading branch information
areibman authored Jul 25, 2024
2 parents 6479a11 + f126dd6 commit 07e60b9
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 7 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ tokencost = ["model_prices.json"]

[project]
name = "tokencost"
version = "0.1.11"
version = "0.1.12"
authors = [
{ name = "Trisha Pan", email = "trishaepan@gmail.com" },
{ name = "Alex Reibman", email = "areibman@gmail.com" },
Expand Down
6 changes: 4 additions & 2 deletions tokencost/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
import asyncio
import logging

logger = logging.getLogger(__name__)

"""
Prompt (aka context) tokens are based on number of words + other chars (eg spaces and punctuation) in input.
Completion tokens are similarly based on how long chatGPT's response is.
Expand Down Expand Up @@ -46,7 +48,7 @@ async def update_token_costs():
try:
TOKEN_COSTS = await fetch_costs()
except Exception as e:
logging.error(f"Failed to update TOKEN_COSTS: {e}")
logger.error(f"Failed to update TOKEN_COSTS: {e}")
raise

with open(os.path.join(os.path.dirname(__file__), "model_prices.json"), "r") as f:
Expand All @@ -57,5 +59,5 @@ async def update_token_costs():
try:
asyncio.run(update_token_costs())
except Exception:
logging.error('Failed to update token costs. Using static costs.')
logger.error('Failed to update token costs. Using static costs.')
TOKEN_COSTS = TOKEN_COSTS_STATIC
9 changes: 5 additions & 4 deletions tokencost/costs.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from decimal import Decimal
import logging

logger = logging.getLogger(__name__)

# TODO: Add Claude support
# https://www-files.anthropic.com/production/images/model_pricing_july2023.pdf
Expand Down Expand Up @@ -41,7 +42,7 @@ def count_message_tokens(messages: List[Dict[str, str]], model: str) -> int:
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logging.warning("Model not found. Using cl100k_base encoding.")
logger.warning("Model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
Expand All @@ -58,12 +59,12 @@ def count_message_tokens(messages: List[Dict[str, str]], model: str) -> int:
tokens_per_message = 4
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
logging.warning(
logger.warning(
"gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613."
)
return count_message_tokens(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
logging.warning(
logger.warning(
"gpt-4 may update over time. Returning num tokens assuming gpt-4-0613."
)
return count_message_tokens(messages, model="gpt-4-0613")
Expand Down Expand Up @@ -98,7 +99,7 @@ def count_string_tokens(prompt: str, model: str) -> int:
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logging.warning("Warning: model not found. Using cl100k_base encoding.")
logger.warning("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")

return len(encoding.encode(prompt))
Expand Down

0 comments on commit 07e60b9

Please sign in to comment.