janus.llm.model_callbacks#

Attributes#

Classes#

TokenUsageCallbackHandler

Callback Handler that tracks metadata on model cost, retries, etc.

Functions#

Module Contents#

janus.llm.model_callbacks.log#
janus.llm.model_callbacks.COST_PER_1K_TOKENS: dict[str, dict[str, float]]#
class janus.llm.model_callbacks.TokenUsageCallbackHandler#

Bases: langchain_core.callbacks.BaseCallbackHandler

Callback Handler that tracks metadata on model cost, retries, etc. Based on https://github.com/langchain-ai/langchain/blob/master/libs

/community/langchain_community/callbacks/openai_info.py

total_tokens: int = 0#
prompt_tokens: int = 0#
completion_tokens: int = 0#
successful_requests: int = 0#
total_cost: float = 0.0#
property always_verbose: bool#

Whether to call verbose callbacks even if verbose is False.

Return type:

bool

on_chat_model_start(*args, **kwargs)#
on_llm_start(serialized, prompts, **kwargs)#

Print out the prompts.

Parameters:
  • serialized (dict[str, Any]) –

  • prompts (list[str]) –

  • kwargs (Any) –

Return type:

None

on_llm_new_token(token, **kwargs)#

Print out the token.

Parameters:
  • token (str) –

  • kwargs (Any) –

Return type:

None

on_llm_end(response, **kwargs)#

Collect token usage.

Parameters:
  • response (langchain_core.outputs.LLMResult) –

  • kwargs (Any) –

Return type:

None

janus.llm.model_callbacks.token_usage_callback_var: contextvars.ContextVar[TokenUsageCallbackHandler | None]#
janus.llm.model_callbacks.get_model_callback()#
Return type:

Generator[TokenUsageCallbackHandler, None, None]