type
stringclasses 5
values | name
stringlengths 1
55
| qualified_name
stringlengths 5
143
| docstring
stringlengths 0
3.59k
⌀ | filepath
stringclasses 180
values | is_public
bool 2
classes | is_private
bool 2
classes | line_start
float64 0
1.54k
⌀ | line_end
float64 0
1.56k
⌀ | annotation
stringclasses 8
values | returns
stringclasses 236
values | parameters
listlengths 0
74
⌀ | parent_class
stringclasses 298
values | value
stringclasses 112
values | bases
listlengths 0
3
⌀ | api_element_summary
stringlengths 199
23k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
module
|
profile_manager
|
fenic._inference.profile_manager
|
Shared profile configuration management for model clients.
|
site-packages/fenic/_inference/profile_manager.py
| true | false | null | null | null | null | null | null | null | null |
Type: module
Member Name: profile_manager
Qualified Name: fenic._inference.profile_manager
Docstring: Shared profile configuration management for model clients.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
attribute
|
ProfileT
|
fenic._inference.profile_manager.ProfileT
| null |
site-packages/fenic/_inference/profile_manager.py
| true | false | 7 | 7 | null | null | null | null |
TypeVar('ProfileT')
| null |
Type: attribute
Member Name: ProfileT
Qualified Name: fenic._inference.profile_manager.ProfileT
Docstring: none
Value: TypeVar('ProfileT')
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
attribute
|
ConfigT
|
fenic._inference.profile_manager.ConfigT
| null |
site-packages/fenic/_inference/profile_manager.py
| true | false | 8 | 8 | null | null | null | null |
TypeVar('ConfigT')
| null |
Type: attribute
Member Name: ConfigT
Qualified Name: fenic._inference.profile_manager.ConfigT
Docstring: none
Value: TypeVar('ConfigT')
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
class
|
BaseProfileConfiguration
|
fenic._inference.profile_manager.BaseProfileConfiguration
| null |
site-packages/fenic/_inference/profile_manager.py
| true | false | 11 | 13 | null | null | null | null | null |
[] |
Type: class
Member Name: BaseProfileConfiguration
Qualified Name: fenic._inference.profile_manager.BaseProfileConfiguration
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.profile_manager.BaseProfileConfiguration.__init__
| null |
site-packages/fenic/_inference/profile_manager.py
| true | false | 0 | 0 | null |
None
|
[
"self"
] |
BaseProfileConfiguration
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.profile_manager.BaseProfileConfiguration.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: None
Parent Class: BaseProfileConfiguration
|
class
|
ProfileManager
|
fenic._inference.profile_manager.ProfileManager
|
Abstract base class for managing profile configurations across providers.
|
site-packages/fenic/_inference/profile_manager.py
| true | false | 16 | 71 | null | null | null | null | null |
[
"Generic[ProfileT, ConfigT]",
"ABC"
] |
Type: class
Member Name: ProfileManager
Qualified Name: fenic._inference.profile_manager.ProfileManager
Docstring: Abstract base class for managing profile configurations across providers.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.profile_manager.ProfileManager.__init__
|
Initialize the profile configuration manager.
Args:
profile_configurations: Dictionary mapping profile names to configurations
default_profile_name: Name of the default profile to use when none specified
|
site-packages/fenic/_inference/profile_manager.py
| true | false | 19 | 35 | null | null |
[
"self",
"profile_configurations",
"default_profile_name"
] |
ProfileManager
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.profile_manager.ProfileManager.__init__
Docstring: Initialize the profile configuration manager.
Args:
profile_configurations: Dictionary mapping profile names to configurations
default_profile_name: Name of the default profile to use when none specified
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "profile_configurations", "default_profile_name"]
Returns: none
Parent Class: ProfileManager
|
method
|
_process_profile
|
fenic._inference.profile_manager.ProfileManager._process_profile
|
Process a raw profile configuration into the provider-specific format.
Args:
profile: Raw profile configuration from session config
Returns:
Processed configuration object for this provider
|
site-packages/fenic/_inference/profile_manager.py
| false | true | 37 | 47 | null |
ConfigT
|
[
"self",
"profile"
] |
ProfileManager
| null | null |
Type: method
Member Name: _process_profile
Qualified Name: fenic._inference.profile_manager.ProfileManager._process_profile
Docstring: Process a raw profile configuration into the provider-specific format.
Args:
profile: Raw profile configuration from session config
Returns:
Processed configuration object for this provider
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "profile"]
Returns: ConfigT
Parent Class: ProfileManager
|
method
|
get_default_profile
|
fenic._inference.profile_manager.ProfileManager.get_default_profile
|
Get the default configuration when no profile is specified.
Returns:
Default configuration object
|
site-packages/fenic/_inference/profile_manager.py
| true | false | 49 | 56 | null |
ConfigT
|
[
"self"
] |
ProfileManager
| null | null |
Type: method
Member Name: get_default_profile
Qualified Name: fenic._inference.profile_manager.ProfileManager.get_default_profile
Docstring: Get the default configuration when no profile is specified.
Returns:
Default configuration object
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: ConfigT
Parent Class: ProfileManager
|
method
|
get_profile_by_name
|
fenic._inference.profile_manager.ProfileManager.get_profile_by_name
|
Get the configuration for a given profile name.
Args:
profile_name: Name of the profile to get configuration for
Returns:
Configuration object for the profile
|
site-packages/fenic/_inference/profile_manager.py
| true | false | 58 | 71 | null |
ConfigT
|
[
"self",
"profile_name"
] |
ProfileManager
| null | null |
Type: method
Member Name: get_profile_by_name
Qualified Name: fenic._inference.profile_manager.ProfileManager.get_profile_by_name
Docstring: Get the configuration for a given profile name.
Args:
profile_name: Name of the profile to get configuration for
Returns:
Configuration object for the profile
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "profile_name"]
Returns: ConfigT
Parent Class: ProfileManager
|
module
|
embedding_model
|
fenic._inference.embedding_model
| null |
site-packages/fenic/_inference/embedding_model.py
| true | false | null | null | null | null | null | null | null | null |
Type: module
Member Name: embedding_model
Qualified Name: fenic._inference.embedding_model
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
class
|
EmbeddingModel
|
fenic._inference.embedding_model.EmbeddingModel
| null |
site-packages/fenic/_inference/embedding_model.py
| true | false | 14 | 44 | null | null | null | null | null |
[] |
Type: class
Member Name: EmbeddingModel
Qualified Name: fenic._inference.embedding_model.EmbeddingModel
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.embedding_model.EmbeddingModel.__init__
| null |
site-packages/fenic/_inference/embedding_model.py
| true | false | 15 | 19 | null | null |
[
"self",
"client"
] |
EmbeddingModel
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.embedding_model.EmbeddingModel.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "client"]
Returns: none
Parent Class: EmbeddingModel
|
method
|
get_embeddings
|
fenic._inference.embedding_model.EmbeddingModel.get_embeddings
| null |
site-packages/fenic/_inference/embedding_model.py
| true | false | 21 | 37 | null |
pa.ListArray
|
[
"self",
"docs",
"model_alias"
] |
EmbeddingModel
| null | null |
Type: method
Member Name: get_embeddings
Qualified Name: fenic._inference.embedding_model.EmbeddingModel.get_embeddings
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "docs", "model_alias"]
Returns: pa.ListArray
Parent Class: EmbeddingModel
|
method
|
reset_metrics
|
fenic._inference.embedding_model.EmbeddingModel.reset_metrics
| null |
site-packages/fenic/_inference/embedding_model.py
| true | false | 40 | 41 | null | null |
[
"self"
] |
EmbeddingModel
| null | null |
Type: method
Member Name: reset_metrics
Qualified Name: fenic._inference.embedding_model.EmbeddingModel.reset_metrics
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: none
Parent Class: EmbeddingModel
|
method
|
get_metrics
|
fenic._inference.embedding_model.EmbeddingModel.get_metrics
| null |
site-packages/fenic/_inference/embedding_model.py
| true | false | 43 | 44 | null |
RMMetrics
|
[
"self"
] |
EmbeddingModel
| null | null |
Type: method
Member Name: get_metrics
Qualified Name: fenic._inference.embedding_model.EmbeddingModel.get_metrics
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: RMMetrics
Parent Class: EmbeddingModel
|
module
|
request_utils
|
fenic._inference.request_utils
|
Utilities for request processing and deduplication.
|
site-packages/fenic/_inference/request_utils.py
| true | false | null | null | null | null | null | null | null | null |
Type: module
Member Name: request_utils
Qualified Name: fenic._inference.request_utils
Docstring: Utilities for request processing and deduplication.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
function
|
generate_completion_request_key
|
fenic._inference.request_utils.generate_completion_request_key
|
Generate a standard SHA256-based key for completion request deduplication.
Args:
request: Completion request to generate key for
Returns:
10-character SHA256 hash of the messages
|
site-packages/fenic/_inference/request_utils.py
| true | false | 9 | 19 | null |
str
|
[
"request"
] | null | null | null |
Type: function
Member Name: generate_completion_request_key
Qualified Name: fenic._inference.request_utils.generate_completion_request_key
Docstring: Generate a standard SHA256-based key for completion request deduplication.
Args:
request: Completion request to generate key for
Returns:
10-character SHA256 hash of the messages
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["request"]
Returns: str
Parent Class: none
|
module
|
token_counter
|
fenic._inference.token_counter
| null |
site-packages/fenic/_inference/token_counter.py
| true | false | null | null | null | null | null | null | null | null |
Type: module
Member Name: token_counter
Qualified Name: fenic._inference.token_counter
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
attribute
|
Tokenizable
|
fenic._inference.token_counter.Tokenizable
| null |
site-packages/fenic/_inference/token_counter.py
| true | false | 8 | 8 | null | null | null | null |
Union[list[dict[str, str]] | str | LMRequestMessages]
| null |
Type: attribute
Member Name: Tokenizable
Qualified Name: fenic._inference.token_counter.Tokenizable
Docstring: none
Value: Union[list[dict[str, str]] | str | LMRequestMessages]
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
class
|
TokenCounter
|
fenic._inference.token_counter.TokenCounter
| null |
site-packages/fenic/_inference/token_counter.py
| true | false | 10 | 11 | null | null | null | null | null |
[
"Protocol"
] |
Type: class
Member Name: TokenCounter
Qualified Name: fenic._inference.token_counter.TokenCounter
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
count_tokens
|
fenic._inference.token_counter.TokenCounter.count_tokens
| null |
site-packages/fenic/_inference/token_counter.py
| true | false | 11 | 11 | null |
int
|
[
"self",
"messages"
] |
TokenCounter
| null | null |
Type: method
Member Name: count_tokens
Qualified Name: fenic._inference.token_counter.TokenCounter.count_tokens
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "messages"]
Returns: int
Parent Class: TokenCounter
|
class
|
TiktokenTokenCounter
|
fenic._inference.token_counter.TiktokenTokenCounter
| null |
site-packages/fenic/_inference/token_counter.py
| true | false | 13 | 43 | null | null | null | null | null |
[] |
Type: class
Member Name: TiktokenTokenCounter
Qualified Name: fenic._inference.token_counter.TiktokenTokenCounter
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.token_counter.TiktokenTokenCounter.__init__
| null |
site-packages/fenic/_inference/token_counter.py
| true | false | 15 | 19 | null | null |
[
"self",
"model_name",
"fallback_encoding"
] |
TiktokenTokenCounter
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.token_counter.TiktokenTokenCounter.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "model_name", "fallback_encoding"]
Returns: none
Parent Class: TiktokenTokenCounter
|
method
|
count_tokens
|
fenic._inference.token_counter.TiktokenTokenCounter.count_tokens
| null |
site-packages/fenic/_inference/token_counter.py
| true | false | 21 | 27 | null |
int
|
[
"self",
"messages"
] |
TiktokenTokenCounter
| null | null |
Type: method
Member Name: count_tokens
Qualified Name: fenic._inference.token_counter.TiktokenTokenCounter.count_tokens
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "messages"]
Returns: int
Parent Class: TiktokenTokenCounter
|
method
|
_count_request_tokens
|
fenic._inference.token_counter.TiktokenTokenCounter._count_request_tokens
| null |
site-packages/fenic/_inference/token_counter.py
| false | true | 29 | 30 | null |
int
|
[
"self",
"messages"
] |
TiktokenTokenCounter
| null | null |
Type: method
Member Name: _count_request_tokens
Qualified Name: fenic._inference.token_counter.TiktokenTokenCounter._count_request_tokens
Docstring: none
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "messages"]
Returns: int
Parent Class: TiktokenTokenCounter
|
method
|
_count_message_tokens
|
fenic._inference.token_counter.TiktokenTokenCounter._count_message_tokens
| null |
site-packages/fenic/_inference/token_counter.py
| false | true | 32 | 43 | null |
int
|
[
"self",
"messages"
] |
TiktokenTokenCounter
| null | null |
Type: method
Member Name: _count_message_tokens
Qualified Name: fenic._inference.token_counter.TiktokenTokenCounter._count_message_tokens
Docstring: none
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "messages"]
Returns: int
Parent Class: TiktokenTokenCounter
|
module
|
model_client
|
fenic._inference.model_client
| null |
site-packages/fenic/_inference/model_client.py
| true | false | null | null | null | null | null | null | null | null |
Type: module
Member Name: model_client
Qualified Name: fenic._inference.model_client
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
attribute
|
RequestT
|
fenic._inference.model_client.RequestT
| null |
site-packages/fenic/_inference/model_client.py
| true | false | 36 | 36 | null | null | null | null |
TypeVar('RequestT')
| null |
Type: attribute
Member Name: RequestT
Qualified Name: fenic._inference.model_client.RequestT
Docstring: none
Value: TypeVar('RequestT')
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
attribute
|
ResponseT
|
fenic._inference.model_client.ResponseT
| null |
site-packages/fenic/_inference/model_client.py
| true | false | 37 | 37 | null | null | null | null |
TypeVar('ResponseT')
| null |
Type: attribute
Member Name: ResponseT
Qualified Name: fenic._inference.model_client.ResponseT
Docstring: none
Value: TypeVar('ResponseT')
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
attribute
|
logger
|
fenic._inference.model_client.logger
| null |
site-packages/fenic/_inference/model_client.py
| true | false | 39 | 39 | null | null | null | null |
logging.getLogger(__name__)
| null |
Type: attribute
Member Name: logger
Qualified Name: fenic._inference.model_client.logger
Docstring: none
Value: logging.getLogger(__name__)
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
class
|
ResponseUsage
|
fenic._inference.model_client.ResponseUsage
|
Token usage information from API response.
|
site-packages/fenic/_inference/model_client.py
| true | false | 41 | 48 | null | null | null | null | null |
[] |
Type: class
Member Name: ResponseUsage
Qualified Name: fenic._inference.model_client.ResponseUsage
Docstring: Token usage information from API response.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.model_client.ResponseUsage.__init__
| null |
site-packages/fenic/_inference/model_client.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"prompt_tokens",
"completion_tokens",
"total_tokens",
"cached_tokens",
"thinking_tokens"
] |
ResponseUsage
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.model_client.ResponseUsage.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "prompt_tokens", "completion_tokens", "total_tokens", "cached_tokens", "thinking_tokens"]
Returns: None
Parent Class: ResponseUsage
|
class
|
TransientException
|
fenic._inference.model_client.TransientException
|
Represents an exception that might be resolved with a retry.
|
site-packages/fenic/_inference/model_client.py
| true | false | 53 | 57 | null | null | null | null | null |
[] |
Type: class
Member Name: TransientException
Qualified Name: fenic._inference.model_client.TransientException
Docstring: Represents an exception that might be resolved with a retry.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.model_client.TransientException.__init__
| null |
site-packages/fenic/_inference/model_client.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"exception"
] |
TransientException
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.model_client.TransientException.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "exception"]
Returns: None
Parent Class: TransientException
|
class
|
FatalException
|
fenic._inference.model_client.FatalException
|
Represents an exception that is unlikely to be resolved with retries.
|
site-packages/fenic/_inference/model_client.py
| true | false | 60 | 64 | null | null | null | null | null |
[] |
Type: class
Member Name: FatalException
Qualified Name: fenic._inference.model_client.FatalException
Docstring: Represents an exception that is unlikely to be resolved with retries.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.model_client.FatalException.__init__
| null |
site-packages/fenic/_inference/model_client.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"exception"
] |
FatalException
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.model_client.FatalException.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "exception"]
Returns: None
Parent Class: FatalException
|
class
|
QueueItem
|
fenic._inference.model_client.QueueItem
|
Represents an item in the request queue.
|
site-packages/fenic/_inference/model_client.py
| true | false | 66 | 74 | null | null | null | null | null |
[
"Generic[RequestT]"
] |
Type: class
Member Name: QueueItem
Qualified Name: fenic._inference.model_client.QueueItem
Docstring: Represents an item in the request queue.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.model_client.QueueItem.__init__
| null |
site-packages/fenic/_inference/model_client.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"thread_id",
"request",
"future",
"estimated_tokens",
"batch_id"
] |
QueueItem
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.model_client.QueueItem.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "thread_id", "request", "future", "estimated_tokens", "batch_id"]
Returns: None
Parent Class: QueueItem
|
class
|
ModelClient
|
fenic._inference.model_client.ModelClient
|
Base client for interacting with language and embedding models.
This abstract base class provides a robust framework for interacting with language models,
handling rate limiting, request queuing, retries, and deduplication. It manages concurrent
requests efficiently using an asynchronous event loop and implements token-based rate limiting.
Type Parameters:
RequestT: The type of request objects this client handles
ResponseT: The type of response objects this client returns
Attributes:
model (str): The name or identifier of the model
model_provider (ModelProvider): The provider of the model (e.g., OPENAI, ANTHROPIC)
model_provider_class (ModelProviderClass): A class that implements common provider logic
rate_limit_strategy (RateLimitStrategy): Strategy for rate limiting requests
token_counter (TiktokenTokenCounter): Counter for estimating token usage
|
site-packages/fenic/_inference/model_client.py
| true | false | 77 | 701 | null | null | null | null | null |
[
"Generic[RequestT, ResponseT]",
"ABC"
] |
Type: class
Member Name: ModelClient
Qualified Name: fenic._inference.model_client.ModelClient
Docstring: Base client for interacting with language and embedding models.
This abstract base class provides a robust framework for interacting with language models,
handling rate limiting, request queuing, retries, and deduplication. It manages concurrent
requests efficiently using an asynchronous event loop and implements token-based rate limiting.
Type Parameters:
RequestT: The type of request objects this client handles
ResponseT: The type of response objects this client returns
Attributes:
model (str): The name or identifier of the model
model_provider (ModelProvider): The provider of the model (e.g., OPENAI, ANTHROPIC)
model_provider_class (ModelProviderClass): A class that implements common provider logic
rate_limit_strategy (RateLimitStrategy): Strategy for rate limiting requests
token_counter (TiktokenTokenCounter): Counter for estimating token usage
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.model_client.ModelClient.__init__
|
Initialize the ModelClient with configuration for model interaction.
Args:
model: The name or identifier of the model
model_provider: The model provider (OPENAI, ANTHROPIC)
model_provider_class: The model provider class (OpenAIModelProvider, AnthropicModelProvider, etc.)
alias: The Model Client's alias, for logging purposes
rate_limit_strategy: Strategy for rate limiting requests
token_counter: Implementation for predicting input token counts
queue_size: Maximum size of the request queue (default: 100)
initial_backoff_seconds: Initial delay for exponential backoff (default: 1)
backoff_factor: Factor by which backoff time increases (default: 2)
max_backoffs: Maximum number of retry attempts (default: 10)
|
site-packages/fenic/_inference/model_client.py
| true | false | 96 | 155 | null | null |
[
"self",
"model",
"model_provider",
"model_provider_class",
"rate_limit_strategy",
"token_counter",
"queue_size",
"initial_backoff_seconds",
"backoff_factor",
"max_backoffs"
] |
ModelClient
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.model_client.ModelClient.__init__
Docstring: Initialize the ModelClient with configuration for model interaction.
Args:
model: The name or identifier of the model
model_provider: The model provider (OPENAI, ANTHROPIC)
model_provider_class: The model provider class (OpenAIModelProvider, AnthropicModelProvider, etc.)
alias: The Model Client's alias, for logging purposes
rate_limit_strategy: Strategy for rate limiting requests
token_counter: Implementation for predicting input token counts
queue_size: Maximum size of the request queue (default: 100)
initial_backoff_seconds: Initial delay for exponential backoff (default: 1)
backoff_factor: Factor by which backoff time increases (default: 2)
max_backoffs: Maximum number of retry attempts (default: 10)
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "model", "model_provider", "model_provider_class", "rate_limit_strategy", "token_counter", "queue_size", "initial_backoff_seconds", "backoff_factor", "max_backoffs"]
Returns: none
Parent Class: ModelClient
|
method
|
make_single_request
|
fenic._inference.model_client.ModelClient.make_single_request
|
Make a single API call to the language model.
This method must be implemented by subclasses to handle the actual API communication
with the language model provider.
Args:
request: The request data to send to the model
Returns:
Union[None, ResponseT, TransientException, FatalException]: The API response,
None if the request was empty, or an exception wrapper indicating either a
transient error (can be retried) or a fatal error (should not be retried)
|
site-packages/fenic/_inference/model_client.py
| true | false | 157 | 174 | null |
Union[None, ResponseT, TransientException, FatalException]
|
[
"self",
"request"
] |
ModelClient
| null | null |
Type: method
Member Name: make_single_request
Qualified Name: fenic._inference.model_client.ModelClient.make_single_request
Docstring: Make a single API call to the language model.
This method must be implemented by subclasses to handle the actual API communication
with the language model provider.
Args:
request: The request data to send to the model
Returns:
Union[None, ResponseT, TransientException, FatalException]: The API response,
None if the request was empty, or an exception wrapper indicating either a
transient error (can be retried) or a fatal error (should not be retried)
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "request"]
Returns: Union[None, ResponseT, TransientException, FatalException]
Parent Class: ModelClient
|
method
|
estimate_tokens_for_request
|
fenic._inference.model_client.ModelClient.estimate_tokens_for_request
|
Estimate the token usage for a given request.
This method must be implemented by subclasses to accurately predict token usage
for both input and output tokens.
Args:
request: The request to estimate tokens for
Returns:
TokenEstimate: Object containing estimated input and output tokens
|
site-packages/fenic/_inference/model_client.py
| true | false | 176 | 189 | null |
TokenEstimate
|
[
"self",
"request"
] |
ModelClient
| null | null |
Type: method
Member Name: estimate_tokens_for_request
Qualified Name: fenic._inference.model_client.ModelClient.estimate_tokens_for_request
Docstring: Estimate the token usage for a given request.
This method must be implemented by subclasses to accurately predict token usage
for both input and output tokens.
Args:
request: The request to estimate tokens for
Returns:
TokenEstimate: Object containing estimated input and output tokens
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "request"]
Returns: TokenEstimate
Parent Class: ModelClient
|
method
|
count_tokens
|
fenic._inference.model_client.ModelClient.count_tokens
|
Count the number of tokens in a tokenizable object.
Args:
messages: The tokenizable object to count tokens for
Returns:
int: The number of tokens in the object
|
site-packages/fenic/_inference/model_client.py
| true | false | 191 | 200 | null |
int
|
[
"self",
"messages"
] |
ModelClient
| null | null |
Type: method
Member Name: count_tokens
Qualified Name: fenic._inference.model_client.ModelClient.count_tokens
Docstring: Count the number of tokens in a tokenizable object.
Args:
messages: The tokenizable object to count tokens for
Returns:
int: The number of tokens in the object
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "messages"]
Returns: int
Parent Class: ModelClient
|
method
|
get_request_key
|
fenic._inference.model_client.ModelClient.get_request_key
|
Generate a unique key for request deduplication.
This method must be implemented by subclasses to provide a hashable key that
uniquely identifies a request for deduplication purposes.
Args:
request: The request to generate a key for
Returns:
Any: A hashable value that uniquely identifies this request
|
site-packages/fenic/_inference/model_client.py
| true | false | 202 | 215 | null |
Any
|
[
"self",
"request"
] |
ModelClient
| null | null |
Type: method
Member Name: get_request_key
Qualified Name: fenic._inference.model_client.ModelClient.get_request_key
Docstring: Generate a unique key for request deduplication.
This method must be implemented by subclasses to provide a hashable key that
uniquely identifies a request for deduplication purposes.
Args:
request: The request to generate a key for
Returns:
Any: A hashable value that uniquely identifies this request
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "request"]
Returns: Any
Parent Class: ModelClient
|
method
|
get_metrics
|
fenic._inference.model_client.ModelClient.get_metrics
|
Get the current metrics for this model client.
Returns:
LMMetrics: The current metrics for this client
|
site-packages/fenic/_inference/model_client.py
| true | false | 217 | 224 | null |
LMMetrics
|
[
"self"
] |
ModelClient
| null | null |
Type: method
Member Name: get_metrics
Qualified Name: fenic._inference.model_client.ModelClient.get_metrics
Docstring: Get the current metrics for this model client.
Returns:
LMMetrics: The current metrics for this client
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: LMMetrics
Parent Class: ModelClient
|
method
|
reset_metrics
|
fenic._inference.model_client.ModelClient.reset_metrics
|
Reset all metrics for this model client to their initial values.
|
site-packages/fenic/_inference/model_client.py
| true | false | 226 | 229 | null | null |
[
"self"
] |
ModelClient
| null | null |
Type: method
Member Name: reset_metrics
Qualified Name: fenic._inference.model_client.ModelClient.reset_metrics
Docstring: Reset all metrics for this model client to their initial values.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: none
Parent Class: ModelClient
|
method
|
_count_auxiliary_input_tokens
|
fenic._inference.model_client.ModelClient._count_auxiliary_input_tokens
|
Count extra input tokens for structured output, tools, etc. Override as needed.
|
site-packages/fenic/_inference/model_client.py
| false | true | 231 | 235 | null |
int
|
[
"self",
"request"
] |
ModelClient
| null | null |
Type: method
Member Name: _count_auxiliary_input_tokens
Qualified Name: fenic._inference.model_client.ModelClient._count_auxiliary_input_tokens
Docstring: Count extra input tokens for structured output, tools, etc. Override as needed.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "request"]
Returns: int
Parent Class: ModelClient
|
method
|
_estimate_structured_output_overhead
|
fenic._inference.model_client.ModelClient._estimate_structured_output_overhead
|
Default structured output token estimation. Override for provider-specific logic.
|
site-packages/fenic/_inference/model_client.py
| false | true | 237 | 241 | null |
int
|
[
"self",
"response_format"
] |
ModelClient
| null | null |
Type: method
Member Name: _estimate_structured_output_overhead
Qualified Name: fenic._inference.model_client.ModelClient._estimate_structured_output_overhead
Docstring: Default structured output token estimation. Override for provider-specific logic.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "response_format"]
Returns: int
Parent Class: ModelClient
|
method
|
_get_max_output_tokens
|
fenic._inference.model_client.ModelClient._get_max_output_tokens
|
Get conservative output token estimate. Override in subclasses for provider-specific logic.
|
site-packages/fenic/_inference/model_client.py
| false | true | 244 | 247 | null |
int
|
[
"self",
"request"
] |
ModelClient
| null | null |
Type: method
Member Name: _get_max_output_tokens
Qualified Name: fenic._inference.model_client.ModelClient._get_max_output_tokens
Docstring: Get conservative output token estimate. Override in subclasses for provider-specific logic.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "request"]
Returns: int
Parent Class: ModelClient
|
method
|
shutdown
|
fenic._inference.model_client.ModelClient.shutdown
|
Shut down the model client and clean up resources.
This method:
1. Cancels all pending and in-flight requests
2. Unregisters the client from the ModelClientManager
3. Cleans up all associated resources
4. Ensures all threads are properly notified of the shutdown
|
site-packages/fenic/_inference/model_client.py
| true | false | 252 | 289 | null | null |
[
"self"
] |
ModelClient
| null | null |
Type: method
Member Name: shutdown
Qualified Name: fenic._inference.model_client.ModelClient.shutdown
Docstring: Shut down the model client and clean up resources.
This method:
1. Cancels all pending and in-flight requests
2. Unregisters the client from the ModelClientManager
3. Cleans up all associated resources
4. Ensures all threads are properly notified of the shutdown
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: none
Parent Class: ModelClient
|
method
|
make_batch_requests
|
fenic._inference.model_client.ModelClient.make_batch_requests
|
Submit and process a batch of requests asynchronously.
This method handles the submission and processing of multiple requests in parallel,
with automatic deduplication and rate limiting. It provides progress tracking
and handles empty requests appropriately.
Args:
requests: List of requests to process. None entries are handled as empty responses
operation_name: Name for logging purposes to identify the operation
Returns:
List[ResponseT]: List of responses in the same order as the input requests
|
site-packages/fenic/_inference/model_client.py
| true | false | 291 | 313 | null |
List[ResponseT]
|
[
"self",
"requests",
"operation_name"
] |
ModelClient
| null | null |
Type: method
Member Name: make_batch_requests
Qualified Name: fenic._inference.model_client.ModelClient.make_batch_requests
Docstring: Submit and process a batch of requests asynchronously.
This method handles the submission and processing of multiple requests in parallel,
with automatic deduplication and rate limiting. It provides progress tracking
and handles empty requests appropriately.
Args:
requests: List of requests to process. None entries are handled as empty responses
operation_name: Name for logging purposes to identify the operation
Returns:
List[ResponseT]: List of responses in the same order as the input requests
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "requests", "operation_name"]
Returns: List[ResponseT]
Parent Class: ModelClient
|
method
|
_get_or_create_request_future
|
fenic._inference.model_client.ModelClient._get_or_create_request_future
|
Retrieves an existing future for a duplicate request or creates a new one.
Args:
unique_futures: A dictionary mapping request keys to their futures.
request: The current request being processed.
Returns:
A tuple of the future for the request and the estimated number of tokens (0 for duplicates).
|
site-packages/fenic/_inference/model_client.py
| false | true | 318 | 363 | null |
tuple[Future, TokenEstimate | None]
|
[
"self",
"unique_futures",
"request"
] |
ModelClient
| null | null |
Type: method
Member Name: _get_or_create_request_future
Qualified Name: fenic._inference.model_client.ModelClient._get_or_create_request_future
Docstring: Retrieves an existing future for a duplicate request or creates a new one.
Args:
unique_futures: A dictionary mapping request keys to their futures.
request: The current request being processed.
Returns:
A tuple of the future for the request and the estimated number of tokens (0 for duplicates).
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "unique_futures", "request"]
Returns: tuple[Future, TokenEstimate | None]
Parent Class: ModelClient
|
method
|
_maybe_raise_thread_exception
|
fenic._inference.model_client.ModelClient._maybe_raise_thread_exception
|
Surface exceptions from event loop to calling thread immediately.
|
site-packages/fenic/_inference/model_client.py
| false | true | 365 | 370 | null | null |
[
"self"
] |
ModelClient
| null | null |
Type: method
Member Name: _maybe_raise_thread_exception
Qualified Name: fenic._inference.model_client.ModelClient._maybe_raise_thread_exception
Docstring: Surface exceptions from event loop to calling thread immediately.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self"]
Returns: none
Parent Class: ModelClient
|
method
|
_calculate_backoff_time
|
fenic._inference.model_client.ModelClient._calculate_backoff_time
|
Calculates the backoff duration using exponential backoff with a maximum limit.
Args:
backoff_iteration: The current backoff iteration.
Returns:
The backoff time in seconds.
|
site-packages/fenic/_inference/model_client.py
| false | true | 372 | 384 | null |
float
|
[
"self",
"backoff_iteration"
] |
ModelClient
| null | null |
Type: method
Member Name: _calculate_backoff_time
Qualified Name: fenic._inference.model_client.ModelClient._calculate_backoff_time
Docstring: Calculates the backoff duration using exponential backoff with a maximum limit.
Args:
backoff_iteration: The current backoff iteration.
Returns:
The backoff time in seconds.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "backoff_iteration"]
Returns: float
Parent Class: ModelClient
|
method
|
_check_and_consume_rate_limit
|
fenic._inference.model_client.ModelClient._check_and_consume_rate_limit
|
Checks if there is enough capacity in both the token and request rate limit buckets,
and consumes the capacity if so.
Args:
token_amount: A TokenEstimate object containing the estimated input, output, and total tokens.
Returns:
True if there was enough capacity and it was consumed, False otherwise.
|
site-packages/fenic/_inference/model_client.py
| false | true | 386 | 396 | null |
bool
|
[
"self",
"token_amount"
] |
ModelClient
| null | null |
Type: method
Member Name: _check_and_consume_rate_limit
Qualified Name: fenic._inference.model_client.ModelClient._check_and_consume_rate_limit
Docstring: Checks if there is enough capacity in both the token and request rate limit buckets,
and consumes the capacity if so.
Args:
token_amount: A TokenEstimate object containing the estimated input, output, and total tokens.
Returns:
True if there was enough capacity and it was consumed, False otherwise.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "token_amount"]
Returns: bool
Parent Class: ModelClient
|
method
|
_enqueue_request
|
fenic._inference.model_client.ModelClient._enqueue_request
|
Enqueue a request to be processed.
Args:
queue_item: The queue item to enqueue.
|
site-packages/fenic/_inference/model_client.py
| false | true | 398 | 404 | null | null |
[
"self",
"queue_item"
] |
ModelClient
| null | null |
Type: method
Member Name: _enqueue_request
Qualified Name: fenic._inference.model_client.ModelClient._enqueue_request
Docstring: Enqueue a request to be processed.
Args:
queue_item: The queue item to enqueue.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "queue_item"]
Returns: none
Parent Class: ModelClient
|
method
|
_make_batch_requests
|
fenic._inference.model_client.ModelClient._make_batch_requests
|
Standard batch processing without sampling (used by both sampling and non-sampling flows).
|
site-packages/fenic/_inference/model_client.py
| false | true | 407 | 435 | null |
List[ResponseT]
|
[
"self",
"requests",
"operation_name",
"batch_id"
] |
ModelClient
| null | null |
Type: method
Member Name: _make_batch_requests
Qualified Name: fenic._inference.model_client.ModelClient._make_batch_requests
Docstring: Standard batch processing without sampling (used by both sampling and non-sampling flows).
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "requests", "operation_name", "batch_id"]
Returns: List[ResponseT]
Parent Class: ModelClient
|
method
|
_submit_batch_requests
|
fenic._inference.model_client.ModelClient._submit_batch_requests
|
Submit all requests in a batch and return futures, unique request count, and token estimate.
Args:
requests: List of requests to submit
batch_id: Batch identifier for tracking
Returns:
Tuple of (request_futures, num_unique_requests, total_token_estimate)
|
site-packages/fenic/_inference/model_client.py
| false | true | 437 | 505 | null |
tuple[List[Future], int, TokenEstimate]
|
[
"self",
"requests",
"batch_id"
] |
ModelClient
| null | null |
Type: method
Member Name: _submit_batch_requests
Qualified Name: fenic._inference.model_client.ModelClient._submit_batch_requests
Docstring: Submit all requests in a batch and return futures, unique request count, and token estimate.
Args:
requests: List of requests to submit
batch_id: Batch identifier for tracking
Returns:
Tuple of (request_futures, num_unique_requests, total_token_estimate)
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "requests", "batch_id"]
Returns: tuple[List[Future], int, TokenEstimate]
Parent Class: ModelClient
|
method
|
_collect_batch_responses
|
fenic._inference.model_client.ModelClient._collect_batch_responses
|
Collect responses from all request futures with progress tracking.
Args:
request_futures: List of futures to wait for
batch_id: Batch identifier for logging
Returns:
List of responses in same order as input futures
|
site-packages/fenic/_inference/model_client.py
| false | true | 507 | 527 | null |
List[ResponseT]
|
[
"self",
"request_futures",
"batch_id"
] |
ModelClient
| null | null |
Type: method
Member Name: _collect_batch_responses
Qualified Name: fenic._inference.model_client.ModelClient._collect_batch_responses
Docstring: Collect responses from all request futures with progress tracking.
Args:
request_futures: List of futures to wait for
batch_id: Batch identifier for logging
Returns:
List of responses in same order as input futures
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "request_futures", "batch_id"]
Returns: List[ResponseT]
Parent Class: ModelClient
|
method
|
_process_queue
|
fenic._inference.model_client.ModelClient._process_queue
|
Continuously processes requests from the request and retry queues. This method runs on the shared asyncio event loop.
|
site-packages/fenic/_inference/model_client.py
| false | true | 532 | 561 | null | null |
[
"self"
] |
ModelClient
| null | null |
Type: method
Member Name: _process_queue
Qualified Name: fenic._inference.model_client.ModelClient._process_queue
Docstring: Continuously processes requests from the request and retry queues. This method runs on the shared asyncio event loop.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self"]
Returns: none
Parent Class: ModelClient
|
method
|
_process_single_request
|
fenic._inference.model_client.ModelClient._process_single_request
|
Process a single request from the queues.
Args:
queue_item: The queue item to process.
|
site-packages/fenic/_inference/model_client.py
| false | true | 564 | 588 | null | null |
[
"self",
"queue_item"
] |
ModelClient
| null | null |
Type: method
Member Name: _process_single_request
Qualified Name: fenic._inference.model_client.ModelClient._process_single_request
Docstring: Process a single request from the queues.
Args:
queue_item: The queue item to process.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "queue_item"]
Returns: none
Parent Class: ModelClient
|
method
|
_handle_response
|
fenic._inference.model_client.ModelClient._handle_response
|
Handle the response from a request, including retrying if necessary.
Args:
queue_item: The queue item associated with the request.
maybe_response: The response or exception from the request.
|
site-packages/fenic/_inference/model_client.py
| false | true | 590 | 621 | null | null |
[
"self",
"queue_item",
"maybe_response"
] |
ModelClient
| null | null |
Type: method
Member Name: _handle_response
Qualified Name: fenic._inference.model_client.ModelClient._handle_response
Docstring: Handle the response from a request, including retrying if necessary.
Args:
queue_item: The queue item associated with the request.
maybe_response: The response or exception from the request.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "queue_item", "maybe_response"]
Returns: none
Parent Class: ModelClient
|
method
|
_maybe_backoff
|
fenic._inference.model_client.ModelClient._maybe_backoff
|
Manages the backoff period after encountering a transient exception.
|
site-packages/fenic/_inference/model_client.py
| false | true | 623 | 639 | null | null |
[
"self"
] |
ModelClient
| null | null |
Type: method
Member Name: _maybe_backoff
Qualified Name: fenic._inference.model_client.ModelClient._maybe_backoff
Docstring: Manages the backoff period after encountering a transient exception.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self"]
Returns: none
Parent Class: ModelClient
|
method
|
_get_queued_requests
|
fenic._inference.model_client.ModelClient._get_queued_requests
|
Asynchronously retrieves items from the retry queue or the request queue,
prioritizing the retry queue. Returns None if a shutdown is signaled.
Returns:
A list of queue items, or None if a shutdown is signaled.
|
site-packages/fenic/_inference/model_client.py
| false | true | 642 | 671 | null |
List[QueueItem[RequestT]]
|
[
"self"
] |
ModelClient
| null | null |
Type: method
Member Name: _get_queued_requests
Qualified Name: fenic._inference.model_client.ModelClient._get_queued_requests
Docstring: Asynchronously retrieves items from the retry queue or the request queue,
prioritizing the retry queue. Returns None if a shutdown is signaled.
Returns:
A list of queue items, or None if a shutdown is signaled.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self"]
Returns: List[QueueItem[RequestT]]
Parent Class: ModelClient
|
method
|
_track_inflight_task
|
fenic._inference.model_client.ModelClient._track_inflight_task
|
Adds a task to the set of inflight requests and removes it upon completion.
Args:
task: The task to track.
|
site-packages/fenic/_inference/model_client.py
| false | true | 673 | 680 | null | null |
[
"self",
"task"
] |
ModelClient
| null | null |
Type: method
Member Name: _track_inflight_task
Qualified Name: fenic._inference.model_client.ModelClient._track_inflight_task
Docstring: Adds a task to the set of inflight requests and removes it upon completion.
Args:
task: The task to track.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "task"]
Returns: none
Parent Class: ModelClient
|
method
|
_register_thread_exception
|
fenic._inference.model_client.ModelClient._register_thread_exception
|
Registers an exception that occurred on the event loop to be raised in the originating thread.
Args:
queue_item: The queue item associated with the exception.
exception: The exception that occurred.
|
site-packages/fenic/_inference/model_client.py
| false | true | 682 | 695 | null | null |
[
"self",
"queue_item",
"exception"
] |
ModelClient
| null | null |
Type: method
Member Name: _register_thread_exception
Qualified Name: fenic._inference.model_client.ModelClient._register_thread_exception
Docstring: Registers an exception that occurred on the event loop to be raised in the originating thread.
Args:
queue_item: The queue item associated with the exception.
exception: The exception that occurred.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "queue_item", "exception"]
Returns: none
Parent Class: ModelClient
|
method
|
_cancel_in_flight_requests
|
fenic._inference.model_client.ModelClient._cancel_in_flight_requests
|
Cancels all inflight tasks and gathers their results.
|
site-packages/fenic/_inference/model_client.py
| false | true | 697 | 701 | null | null |
[
"self"
] |
ModelClient
| null | null |
Type: method
Member Name: _cancel_in_flight_requests
Qualified Name: fenic._inference.model_client.ModelClient._cancel_in_flight_requests
Docstring: Cancels all inflight tasks and gathers their results.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self"]
Returns: none
Parent Class: ModelClient
|
module
|
types
|
fenic._inference.types
| null |
site-packages/fenic/_inference/types.py
| true | false | null | null | null | null | null | null | null | null |
Type: module
Member Name: types
Qualified Name: fenic._inference.types
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
class
|
FewShotExample
|
fenic._inference.types.FewShotExample
| null |
site-packages/fenic/_inference/types.py
| true | false | 9 | 12 | null | null | null | null | null |
[] |
Type: class
Member Name: FewShotExample
Qualified Name: fenic._inference.types.FewShotExample
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.types.FewShotExample.__init__
| null |
site-packages/fenic/_inference/types.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"user",
"assistant"
] |
FewShotExample
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.types.FewShotExample.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "user", "assistant"]
Returns: None
Parent Class: FewShotExample
|
class
|
LMRequestMessages
|
fenic._inference.types.LMRequestMessages
| null |
site-packages/fenic/_inference/types.py
| true | false | 14 | 28 | null | null | null | null | null |
[] |
Type: class
Member Name: LMRequestMessages
Qualified Name: fenic._inference.types.LMRequestMessages
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
to_message_list
|
fenic._inference.types.LMRequestMessages.to_message_list
| null |
site-packages/fenic/_inference/types.py
| true | false | 20 | 28 | null |
List[Dict[str, str]]
|
[
"self"
] |
LMRequestMessages
| null | null |
Type: method
Member Name: to_message_list
Qualified Name: fenic._inference.types.LMRequestMessages.to_message_list
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: List[Dict[str, str]]
Parent Class: LMRequestMessages
|
method
|
__init__
|
fenic._inference.types.LMRequestMessages.__init__
| null |
site-packages/fenic/_inference/types.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"system",
"examples",
"user"
] |
LMRequestMessages
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.types.LMRequestMessages.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "system", "examples", "user"]
Returns: None
Parent Class: LMRequestMessages
|
class
|
ResponseUsage
|
fenic._inference.types.ResponseUsage
|
Token usage information from API response.
|
site-packages/fenic/_inference/types.py
| true | false | 30 | 37 | null | null | null | null | null |
[] |
Type: class
Member Name: ResponseUsage
Qualified Name: fenic._inference.types.ResponseUsage
Docstring: Token usage information from API response.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.types.ResponseUsage.__init__
| null |
site-packages/fenic/_inference/types.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"prompt_tokens",
"completion_tokens",
"total_tokens",
"cached_tokens",
"thinking_tokens"
] |
ResponseUsage
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.types.ResponseUsage.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "prompt_tokens", "completion_tokens", "total_tokens", "cached_tokens", "thinking_tokens"]
Returns: None
Parent Class: ResponseUsage
|
class
|
FenicCompletionsResponse
|
fenic._inference.types.FenicCompletionsResponse
| null |
site-packages/fenic/_inference/types.py
| true | false | 39 | 43 | null | null | null | null | null |
[] |
Type: class
Member Name: FenicCompletionsResponse
Qualified Name: fenic._inference.types.FenicCompletionsResponse
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.types.FenicCompletionsResponse.__init__
| null |
site-packages/fenic/_inference/types.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"completion",
"logprobs",
"usage"
] |
FenicCompletionsResponse
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.types.FenicCompletionsResponse.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "completion", "logprobs", "usage"]
Returns: None
Parent Class: FenicCompletionsResponse
|
class
|
FenicCompletionsRequest
|
fenic._inference.types.FenicCompletionsRequest
| null |
site-packages/fenic/_inference/types.py
| true | false | 46 | 53 | null | null | null | null | null |
[] |
Type: class
Member Name: FenicCompletionsRequest
Qualified Name: fenic._inference.types.FenicCompletionsRequest
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.types.FenicCompletionsRequest.__init__
| null |
site-packages/fenic/_inference/types.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"messages",
"max_completion_tokens",
"top_logprobs",
"structured_output",
"temperature",
"model_profile"
] |
FenicCompletionsRequest
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.types.FenicCompletionsRequest.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "messages", "max_completion_tokens", "top_logprobs", "structured_output", "temperature", "model_profile"]
Returns: None
Parent Class: FenicCompletionsRequest
|
class
|
FenicEmbeddingsRequest
|
fenic._inference.types.FenicEmbeddingsRequest
| null |
site-packages/fenic/_inference/types.py
| true | false | 55 | 58 | null | null | null | null | null |
[] |
Type: class
Member Name: FenicEmbeddingsRequest
Qualified Name: fenic._inference.types.FenicEmbeddingsRequest
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.types.FenicEmbeddingsRequest.__init__
| null |
site-packages/fenic/_inference/types.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"doc",
"model_profile"
] |
FenicEmbeddingsRequest
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.types.FenicEmbeddingsRequest.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "doc", "model_profile"]
Returns: None
Parent Class: FenicEmbeddingsRequest
|
module
|
rate_limit_strategy
|
fenic._inference.rate_limit_strategy
| null |
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | null | null | null | null | null | null | null | null |
Type: module
Member Name: rate_limit_strategy
Qualified Name: fenic._inference.rate_limit_strategy
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
class
|
TokenEstimate
|
fenic._inference.rate_limit_strategy.TokenEstimate
| null |
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 10 | 26 | null | null | null | null | null |
[] |
Type: class
Member Name: TokenEstimate
Qualified Name: fenic._inference.rate_limit_strategy.TokenEstimate
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__post_init__
|
fenic._inference.rate_limit_strategy.TokenEstimate.__post_init__
| null |
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 16 | 17 | null | null |
[
"self"
] |
TokenEstimate
| null | null |
Type: method
Member Name: __post_init__
Qualified Name: fenic._inference.rate_limit_strategy.TokenEstimate.__post_init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: none
Parent Class: TokenEstimate
|
method
|
__str__
|
fenic._inference.rate_limit_strategy.TokenEstimate.__str__
| null |
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 19 | 20 | null | null |
[
"self"
] |
TokenEstimate
| null | null |
Type: method
Member Name: __str__
Qualified Name: fenic._inference.rate_limit_strategy.TokenEstimate.__str__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: none
Parent Class: TokenEstimate
|
method
|
__add__
|
fenic._inference.rate_limit_strategy.TokenEstimate.__add__
| null |
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 22 | 26 | null | null |
[
"self",
"other"
] |
TokenEstimate
| null | null |
Type: method
Member Name: __add__
Qualified Name: fenic._inference.rate_limit_strategy.TokenEstimate.__add__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "other"]
Returns: none
Parent Class: TokenEstimate
|
method
|
__init__
|
fenic._inference.rate_limit_strategy.TokenEstimate.__init__
| null |
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 0 | 0 | null |
None
|
[
"self",
"input_tokens",
"output_tokens"
] |
TokenEstimate
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.rate_limit_strategy.TokenEstimate.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "input_tokens", "output_tokens"]
Returns: None
Parent Class: TokenEstimate
|
class
|
RateLimitBucket
|
fenic._inference.rate_limit_strategy.RateLimitBucket
|
Manages a token bucket for rate limiting.
|
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 29 | 50 | null | null | null | null | null |
[] |
Type: class
Member Name: RateLimitBucket
Qualified Name: fenic._inference.rate_limit_strategy.RateLimitBucket
Docstring: Manages a token bucket for rate limiting.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.rate_limit_strategy.RateLimitBucket.__init__
| null |
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 31 | 34 | null | null |
[
"self",
"max_capacity"
] |
RateLimitBucket
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.rate_limit_strategy.RateLimitBucket.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "max_capacity"]
Returns: none
Parent Class: RateLimitBucket
|
method
|
_get_available_capacity
|
fenic._inference.rate_limit_strategy.RateLimitBucket._get_available_capacity
|
Calculates the available capacity based on the elapsed time and refill rate.
|
site-packages/fenic/_inference/rate_limit_strategy.py
| false | true | 36 | 45 | null |
int
|
[
"self",
"curr_time"
] |
RateLimitBucket
| null | null |
Type: method
Member Name: _get_available_capacity
Qualified Name: fenic._inference.rate_limit_strategy.RateLimitBucket._get_available_capacity
Docstring: Calculates the available capacity based on the elapsed time and refill rate.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "curr_time"]
Returns: int
Parent Class: RateLimitBucket
|
method
|
_set_capacity
|
fenic._inference.rate_limit_strategy.RateLimitBucket._set_capacity
|
Updates the current capacity and last update time.
|
site-packages/fenic/_inference/rate_limit_strategy.py
| false | true | 47 | 50 | null | null |
[
"self",
"capacity",
"curr_time"
] |
RateLimitBucket
| null | null |
Type: method
Member Name: _set_capacity
Qualified Name: fenic._inference.rate_limit_strategy.RateLimitBucket._set_capacity
Docstring: Updates the current capacity and last update time.
Value: none
Annotation: none
is Public? : false
is Private? : true
Parameters: ["self", "capacity", "curr_time"]
Returns: none
Parent Class: RateLimitBucket
|
class
|
RateLimitStrategy
|
fenic._inference.rate_limit_strategy.RateLimitStrategy
|
Base class for implementing rate limiting strategies for language model requests.
This abstract class defines the interface for rate limiting strategies that control
both request rate (RPM) and token usage rate (TPM) for language model API calls.
Subclasses must implement specific token rate limiting strategies.
Attributes:
rpm: Requests per minute limit. Must be greater than 0.
requests_bucket: Token bucket for tracking and limiting request rate.
|
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 53 | 102 | null | null | null | null | null |
[
"ABC"
] |
Type: class
Member Name: RateLimitStrategy
Qualified Name: fenic._inference.rate_limit_strategy.RateLimitStrategy
Docstring: Base class for implementing rate limiting strategies for language model requests.
This abstract class defines the interface for rate limiting strategies that control
both request rate (RPM) and token usage rate (TPM) for language model API calls.
Subclasses must implement specific token rate limiting strategies.
Attributes:
rpm: Requests per minute limit. Must be greater than 0.
requests_bucket: Token bucket for tracking and limiting request rate.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.rate_limit_strategy.RateLimitStrategy.__init__
| null |
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 64 | 69 | null | null |
[
"self",
"rpm"
] |
RateLimitStrategy
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.rate_limit_strategy.RateLimitStrategy.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "rpm"]
Returns: none
Parent Class: RateLimitStrategy
|
method
|
backoff
|
fenic._inference.rate_limit_strategy.RateLimitStrategy.backoff
|
Backoff the request/token rate limit bucket.
|
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 71 | 74 | null |
int
|
[
"self",
"curr_time"
] |
RateLimitStrategy
| null | null |
Type: method
Member Name: backoff
Qualified Name: fenic._inference.rate_limit_strategy.RateLimitStrategy.backoff
Docstring: Backoff the request/token rate limit bucket.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "curr_time"]
Returns: int
Parent Class: RateLimitStrategy
|
method
|
check_and_consume_rate_limit
|
fenic._inference.rate_limit_strategy.RateLimitStrategy.check_and_consume_rate_limit
|
Checks if there is enough capacity in both token and request rate limit buckets.
If there is sufficient capacity, this method will consume the required tokens
and request quota. This is an abstract method that must be implemented by subclasses.
Args:
token_estimate: A TokenEstimate object containing the estimated input, output,
and total tokens for the request.
Returns:
bool: True if there was enough capacity and it was consumed, False otherwise.
|
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 76 | 90 | null |
bool
|
[
"self",
"token_estimate"
] |
RateLimitStrategy
| null | null |
Type: method
Member Name: check_and_consume_rate_limit
Qualified Name: fenic._inference.rate_limit_strategy.RateLimitStrategy.check_and_consume_rate_limit
Docstring: Checks if there is enough capacity in both token and request rate limit buckets.
If there is sufficient capacity, this method will consume the required tokens
and request quota. This is an abstract method that must be implemented by subclasses.
Args:
token_estimate: A TokenEstimate object containing the estimated input, output,
and total tokens for the request.
Returns:
bool: True if there was enough capacity and it was consumed, False otherwise.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "token_estimate"]
Returns: bool
Parent Class: RateLimitStrategy
|
method
|
context_tokens_per_minute
|
fenic._inference.rate_limit_strategy.RateLimitStrategy.context_tokens_per_minute
|
Returns the total token rate limit per minute for this strategy.
This is an abstract method that must be implemented by subclasses to specify
their token rate limiting behavior.
Returns:
int: The total number of tokens allowed per minute.
|
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 92 | 102 | null |
int
|
[
"self"
] |
RateLimitStrategy
| null | null |
Type: method
Member Name: context_tokens_per_minute
Qualified Name: fenic._inference.rate_limit_strategy.RateLimitStrategy.context_tokens_per_minute
Docstring: Returns the total token rate limit per minute for this strategy.
This is an abstract method that must be implemented by subclasses to specify
their token rate limiting behavior.
Returns:
int: The total number of tokens allowed per minute.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self"]
Returns: int
Parent Class: RateLimitStrategy
|
class
|
UnifiedTokenRateLimitStrategy
|
fenic._inference.rate_limit_strategy.UnifiedTokenRateLimitStrategy
|
Rate limiting strategy that uses a single token bucket for both input and output tokens.
This strategy enforces both a request rate limit (RPM) and a unified token rate limit (TPM)
where input and output tokens share the same quota.
Attributes:
tpm: Total tokens per minute limit. Must be greater than 0.
unified_tokens_bucket: Token bucket for tracking and limiting total token usage.
|
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 105 | 167 | null | null | null | null | null |
[
"RateLimitStrategy"
] |
Type: class
Member Name: UnifiedTokenRateLimitStrategy
Qualified Name: fenic._inference.rate_limit_strategy.UnifiedTokenRateLimitStrategy
Docstring: Rate limiting strategy that uses a single token bucket for both input and output tokens.
This strategy enforces both a request rate limit (RPM) and a unified token rate limit (TPM)
where input and output tokens share the same quota.
Attributes:
tpm: Total tokens per minute limit. Must be greater than 0.
unified_tokens_bucket: Token bucket for tracking and limiting total token usage.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: none
Returns: none
Parent Class: none
|
method
|
__init__
|
fenic._inference.rate_limit_strategy.UnifiedTokenRateLimitStrategy.__init__
| null |
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 115 | 118 | null | null |
[
"self",
"rpm",
"tpm"
] |
UnifiedTokenRateLimitStrategy
| null | null |
Type: method
Member Name: __init__
Qualified Name: fenic._inference.rate_limit_strategy.UnifiedTokenRateLimitStrategy.__init__
Docstring: none
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "rpm", "tpm"]
Returns: none
Parent Class: UnifiedTokenRateLimitStrategy
|
method
|
backoff
|
fenic._inference.rate_limit_strategy.UnifiedTokenRateLimitStrategy.backoff
|
Backoff the request/token rate limit bucket.
|
site-packages/fenic/_inference/rate_limit_strategy.py
| true | false | 120 | 124 | null |
int
|
[
"self",
"curr_time"
] |
UnifiedTokenRateLimitStrategy
| null | null |
Type: method
Member Name: backoff
Qualified Name: fenic._inference.rate_limit_strategy.UnifiedTokenRateLimitStrategy.backoff
Docstring: Backoff the request/token rate limit bucket.
Value: none
Annotation: none
is Public? : true
is Private? : false
Parameters: ["self", "curr_time"]
Returns: int
Parent Class: UnifiedTokenRateLimitStrategy
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.