Skip to content

pydantic_ai.usage

RequestUsage dataclass

Bases: UsageBase

LLM usage associated with a single request.

This is an implementation of genai_prices.types.AbstractUsage so it can be used to calculate the price of the request using genai-prices.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
@dataclass(repr=False)
class RequestUsage(UsageBase):
    """LLM usage associated with a single request.

    This is an implementation of `genai_prices.types.AbstractUsage` so it can be used to calculate the price of the
    request using [genai-prices](https://github.com/pydantic/genai-prices).
    """

    @property
    def requests(self):
        return 1

    def incr(self, incr_usage: RequestUsage) -> None:
        """Increment the usage in place.

        Args:
            incr_usage: The usage to increment by.
        """
        return _incr_usage_tokens(self, incr_usage)

    def __add__(self, other: RequestUsage) -> RequestUsage:
        """Add two RequestUsages together.

        This is provided so it's trivial to sum usage information from multiple parts of a response.

        **WARNING:** this CANNOT be used to sum multiple requests without breaking some pricing calculations.
        """
        new_usage = copy(self)
        new_usage.incr(other)
        return new_usage

incr

incr(incr_usage: RequestUsage) -> None

Increment the usage in place.

Parameters:

Name Type Description Default
incr_usage RequestUsage

The usage to increment by.

required
Source code in pydantic_ai_slim/pydantic_ai/usage.py
90
91
92
93
94
95
96
def incr(self, incr_usage: RequestUsage) -> None:
    """Increment the usage in place.

    Args:
        incr_usage: The usage to increment by.
    """
    return _incr_usage_tokens(self, incr_usage)

__add__

__add__(other: RequestUsage) -> RequestUsage

Add two RequestUsages together.

This is provided so it's trivial to sum usage information from multiple parts of a response.

WARNING: this CANNOT be used to sum multiple requests without breaking some pricing calculations.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
 98
 99
100
101
102
103
104
105
106
107
def __add__(self, other: RequestUsage) -> RequestUsage:
    """Add two RequestUsages together.

    This is provided so it's trivial to sum usage information from multiple parts of a response.

    **WARNING:** this CANNOT be used to sum multiple requests without breaking some pricing calculations.
    """
    new_usage = copy(self)
    new_usage.incr(other)
    return new_usage

RunUsage dataclass

Bases: UsageBase

LLM usage associated with an agent run.

Responsibility for calculating request usage is on the model; Pydantic AI simply sums the usage information across requests.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
@dataclass(repr=False)
class RunUsage(UsageBase):
    """LLM usage associated with an agent run.

    Responsibility for calculating request usage is on the model; Pydantic AI simply sums the usage information across requests.
    """

    requests: int = 0
    """Number of requests made to the LLM API."""

    input_tokens: int = 0
    """Total number of text input/prompt tokens."""

    cache_write_tokens: int = 0
    """Total number of tokens written to the cache."""
    cache_read_tokens: int = 0
    """Total number of tokens read from the cache."""

    input_audio_tokens: int = 0
    """Total number of audio input tokens."""
    cache_audio_read_tokens: int = 0
    """Total number of audio tokens read from the cache."""

    output_tokens: int = 0
    """Total number of text output/completion tokens."""

    details: dict[str, int] = dataclasses.field(default_factory=dict)
    """Any extra details returned by the model."""

    def incr(self, incr_usage: RunUsage | RequestUsage) -> None:
        """Increment the usage in place.

        Args:
            incr_usage: The usage to increment by.
        """
        if isinstance(incr_usage, RunUsage):
            self.requests += incr_usage.requests
        return _incr_usage_tokens(self, incr_usage)

    def __add__(self, other: RunUsage | RequestUsage) -> RunUsage:
        """Add two RunUsages together.

        This is provided so it's trivial to sum usage information from multiple runs.
        """
        new_usage = copy(self)
        new_usage.incr(other)
        return new_usage

requests class-attribute instance-attribute

requests: int = 0

Number of requests made to the LLM API.

input_tokens class-attribute instance-attribute

input_tokens: int = 0

Total number of text input/prompt tokens.

cache_write_tokens class-attribute instance-attribute

cache_write_tokens: int = 0

Total number of tokens written to the cache.

cache_read_tokens class-attribute instance-attribute

cache_read_tokens: int = 0

Total number of tokens read from the cache.

input_audio_tokens class-attribute instance-attribute

input_audio_tokens: int = 0

Total number of audio input tokens.

cache_audio_read_tokens class-attribute instance-attribute

cache_audio_read_tokens: int = 0

Total number of audio tokens read from the cache.

output_tokens class-attribute instance-attribute

output_tokens: int = 0

Total number of text output/completion tokens.

details class-attribute instance-attribute

details: dict[str, int] = field(default_factory=dict)

Any extra details returned by the model.

incr

incr(incr_usage: RunUsage | RequestUsage) -> None

Increment the usage in place.

Parameters:

Name Type Description Default
incr_usage RunUsage | RequestUsage

The usage to increment by.

required
Source code in pydantic_ai_slim/pydantic_ai/usage.py
139
140
141
142
143
144
145
146
147
def incr(self, incr_usage: RunUsage | RequestUsage) -> None:
    """Increment the usage in place.

    Args:
        incr_usage: The usage to increment by.
    """
    if isinstance(incr_usage, RunUsage):
        self.requests += incr_usage.requests
    return _incr_usage_tokens(self, incr_usage)

__add__

__add__(other: RunUsage | RequestUsage) -> RunUsage

Add two RunUsages together.

This is provided so it's trivial to sum usage information from multiple runs.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
149
150
151
152
153
154
155
156
def __add__(self, other: RunUsage | RequestUsage) -> RunUsage:
    """Add two RunUsages together.

    This is provided so it's trivial to sum usage information from multiple runs.
    """
    new_usage = copy(self)
    new_usage.incr(other)
    return new_usage

Usage dataclass deprecated

Bases: RunUsage

Deprecated

Usage is deprecated, use RunUsage instead

Deprecated alias for RunUsage.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
177
178
179
180
@dataclass
@deprecated('`Usage` is deprecated, use `RunUsage` instead')
class Usage(RunUsage):
    """Deprecated alias for `RunUsage`."""

UsageLimits dataclass

Limits on model usage.

The request count is tracked by pydantic_ai, and the request limit is checked before each request to the model. Token counts are provided in responses from the model, and the token limits are checked after each response.

Each of the limits can be set to None to disable that limit.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
@dataclass(repr=False)
class UsageLimits:
    """Limits on model usage.

    The request count is tracked by pydantic_ai, and the request limit is checked before each request to the model.
    Token counts are provided in responses from the model, and the token limits are checked after each response.

    Each of the limits can be set to `None` to disable that limit.
    """

    request_limit: int | None = 50
    """The maximum number of requests allowed to the model."""
    input_tokens_limit: int | None = None
    """The maximum number of input/prompt tokens allowed."""
    output_tokens_limit: int | None = None
    """The maximum number of output/response tokens allowed."""
    total_tokens_limit: int | None = None
    """The maximum number of tokens allowed in requests and responses combined."""
    count_tokens_before_request: bool = False
    """If True, perform a token counting pass before sending the request to the model,
    to enforce `request_tokens_limit` ahead of time. This may incur additional overhead
    (from calling the model's `count_tokens` API before making the actual request) and is disabled by default."""

    @property
    @deprecated('`request_tokens_limit` is deprecated, use `input_tokens_limit` instead')
    def request_tokens_limit(self) -> int | None:
        return self.input_tokens_limit

    @property
    @deprecated('`response_tokens_limit` is deprecated, use `output_tokens_limit` instead')
    def response_tokens_limit(self) -> int | None:
        return self.output_tokens_limit

    @overload
    def __init__(
        self,
        *,
        request_limit: int | None = 50,
        input_tokens_limit: int | None = None,
        output_tokens_limit: int | None = None,
        total_tokens_limit: int | None = None,
        count_tokens_before_request: bool = False,
    ) -> None:
        self.request_limit = request_limit
        self.input_tokens_limit = input_tokens_limit
        self.output_tokens_limit = output_tokens_limit
        self.total_tokens_limit = total_tokens_limit
        self.count_tokens_before_request = count_tokens_before_request

    @overload
    @deprecated(
        'Use `input_tokens_limit` instead of `request_tokens_limit` and `output_tokens_limit` and `total_tokens_limit`'
    )
    def __init__(
        self,
        *,
        request_limit: int | None = 50,
        request_tokens_limit: int | None = None,
        response_tokens_limit: int | None = None,
        total_tokens_limit: int | None = None,
        count_tokens_before_request: bool = False,
    ) -> None:
        self.request_limit = request_limit
        self.input_tokens_limit = request_tokens_limit
        self.output_tokens_limit = response_tokens_limit
        self.total_tokens_limit = total_tokens_limit
        self.count_tokens_before_request = count_tokens_before_request

    def __init__(
        self,
        *,
        request_limit: int | None = 50,
        input_tokens_limit: int | None = None,
        output_tokens_limit: int | None = None,
        total_tokens_limit: int | None = None,
        count_tokens_before_request: bool = False,
        # deprecated:
        request_tokens_limit: int | None = None,
        response_tokens_limit: int | None = None,
    ):
        self.request_limit = request_limit
        self.input_tokens_limit = input_tokens_limit or request_tokens_limit
        self.output_tokens_limit = output_tokens_limit or response_tokens_limit
        self.total_tokens_limit = total_tokens_limit
        self.count_tokens_before_request = count_tokens_before_request

    def has_token_limits(self) -> bool:
        """Returns `True` if this instance places any limits on token counts.

        If this returns `False`, the `check_tokens` method will never raise an error.

        This is useful because if we have token limits, we need to check them after receiving each streamed message.
        If there are no limits, we can skip that processing in the streaming response iterator.
        """
        return any(
            limit is not None for limit in (self.input_tokens_limit, self.output_tokens_limit, self.total_tokens_limit)
        )

    def check_before_request(self, usage: RunUsage) -> None:
        """Raises a `UsageLimitExceeded` exception if the next request would exceed any of the limits."""
        request_limit = self.request_limit
        if request_limit is not None and usage.requests >= request_limit:
            raise UsageLimitExceeded(f'The next request would exceed the request_limit of {request_limit}')

        input_tokens = usage.input_tokens
        if self.input_tokens_limit is not None and input_tokens > self.input_tokens_limit:
            raise UsageLimitExceeded(
                f'The next request would exceed the input_tokens_limit of {self.input_tokens_limit} ({input_tokens=})'
            )

        total_tokens = usage.total_tokens
        if self.total_tokens_limit is not None and total_tokens > self.total_tokens_limit:
            raise UsageLimitExceeded(
                f'The next request would exceed the total_tokens_limit of {self.total_tokens_limit} ({total_tokens=})'
            )

    def check_tokens(self, usage: RunUsage) -> None:
        """Raises a `UsageLimitExceeded` exception if the usage exceeds any of the token limits."""
        input_tokens = usage.input_tokens
        if self.input_tokens_limit is not None and input_tokens > self.input_tokens_limit:
            raise UsageLimitExceeded(f'Exceeded the input_tokens_limit of {self.input_tokens_limit} ({input_tokens=})')

        output_tokens = usage.output_tokens
        if self.output_tokens_limit is not None and output_tokens > self.output_tokens_limit:
            raise UsageLimitExceeded(
                f'Exceeded the output_tokens_limit of {self.output_tokens_limit} ({output_tokens=})'
            )

        total_tokens = usage.total_tokens
        if self.total_tokens_limit is not None and total_tokens > self.total_tokens_limit:
            raise UsageLimitExceeded(f'Exceeded the total_tokens_limit of {self.total_tokens_limit} ({total_tokens=})')

    __repr__ = _utils.dataclasses_no_defaults_repr

request_limit class-attribute instance-attribute

request_limit: int | None = request_limit

The maximum number of requests allowed to the model.

input_tokens_limit class-attribute instance-attribute

input_tokens_limit: int | None = (
    input_tokens_limit or request_tokens_limit
)

The maximum number of input/prompt tokens allowed.

output_tokens_limit class-attribute instance-attribute

output_tokens_limit: int | None = (
    output_tokens_limit or response_tokens_limit
)

The maximum number of output/response tokens allowed.

total_tokens_limit class-attribute instance-attribute

total_tokens_limit: int | None = total_tokens_limit

The maximum number of tokens allowed in requests and responses combined.

count_tokens_before_request class-attribute instance-attribute

count_tokens_before_request: bool = (
    count_tokens_before_request
)

If True, perform a token counting pass before sending the request to the model, to enforce request_tokens_limit ahead of time. This may incur additional overhead (from calling the model's count_tokens API before making the actual request) and is disabled by default.

has_token_limits

has_token_limits() -> bool

Returns True if this instance places any limits on token counts.

If this returns False, the check_tokens method will never raise an error.

This is useful because if we have token limits, we need to check them after receiving each streamed message. If there are no limits, we can skip that processing in the streaming response iterator.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
269
270
271
272
273
274
275
276
277
278
279
def has_token_limits(self) -> bool:
    """Returns `True` if this instance places any limits on token counts.

    If this returns `False`, the `check_tokens` method will never raise an error.

    This is useful because if we have token limits, we need to check them after receiving each streamed message.
    If there are no limits, we can skip that processing in the streaming response iterator.
    """
    return any(
        limit is not None for limit in (self.input_tokens_limit, self.output_tokens_limit, self.total_tokens_limit)
    )

check_before_request

check_before_request(usage: RunUsage) -> None

Raises a UsageLimitExceeded exception if the next request would exceed any of the limits.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
def check_before_request(self, usage: RunUsage) -> None:
    """Raises a `UsageLimitExceeded` exception if the next request would exceed any of the limits."""
    request_limit = self.request_limit
    if request_limit is not None and usage.requests >= request_limit:
        raise UsageLimitExceeded(f'The next request would exceed the request_limit of {request_limit}')

    input_tokens = usage.input_tokens
    if self.input_tokens_limit is not None and input_tokens > self.input_tokens_limit:
        raise UsageLimitExceeded(
            f'The next request would exceed the input_tokens_limit of {self.input_tokens_limit} ({input_tokens=})'
        )

    total_tokens = usage.total_tokens
    if self.total_tokens_limit is not None and total_tokens > self.total_tokens_limit:
        raise UsageLimitExceeded(
            f'The next request would exceed the total_tokens_limit of {self.total_tokens_limit} ({total_tokens=})'
        )

check_tokens

check_tokens(usage: RunUsage) -> None

Raises a UsageLimitExceeded exception if the usage exceeds any of the token limits.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
def check_tokens(self, usage: RunUsage) -> None:
    """Raises a `UsageLimitExceeded` exception if the usage exceeds any of the token limits."""
    input_tokens = usage.input_tokens
    if self.input_tokens_limit is not None and input_tokens > self.input_tokens_limit:
        raise UsageLimitExceeded(f'Exceeded the input_tokens_limit of {self.input_tokens_limit} ({input_tokens=})')

    output_tokens = usage.output_tokens
    if self.output_tokens_limit is not None and output_tokens > self.output_tokens_limit:
        raise UsageLimitExceeded(
            f'Exceeded the output_tokens_limit of {self.output_tokens_limit} ({output_tokens=})'
        )

    total_tokens = usage.total_tokens
    if self.total_tokens_limit is not None and total_tokens > self.total_tokens_limit:
        raise UsageLimitExceeded(f'Exceeded the total_tokens_limit of {self.total_tokens_limit} ({total_tokens=})')