mirror of
https://github.com/Ladebeze66/llm_ticket3.git
synced 2025-12-18 07:37:46 +01:00
183 lines
8.7 KiB
Python
183 lines
8.7 KiB
Python
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
|
|
from __future__ import annotations
|
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
from .prediction import Prediction, PredictionTypedDict
|
|
from .responseformat import ResponseFormat, ResponseFormatTypedDict
|
|
from .systemmessage import SystemMessage, SystemMessageTypedDict
|
|
from .tool import Tool, ToolTypedDict
|
|
from .toolchoice import ToolChoice, ToolChoiceTypedDict
|
|
from .toolchoiceenum import ToolChoiceEnum
|
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
from mistralai_gcp.types import (
|
|
BaseModel,
|
|
Nullable,
|
|
OptionalNullable,
|
|
UNSET,
|
|
UNSET_SENTINEL,
|
|
)
|
|
from mistralai_gcp.utils import get_discriminator
|
|
from pydantic import Discriminator, Tag, model_serializer
|
|
from typing import List, Optional, Union
|
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
|
|
|
|
StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]])
|
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
|
|
|
|
Stop = TypeAliasType("Stop", Union[str, List[str]])
|
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
|
|
|
|
MessagesTypedDict = TypeAliasType(
|
|
"MessagesTypedDict",
|
|
Union[
|
|
SystemMessageTypedDict,
|
|
UserMessageTypedDict,
|
|
AssistantMessageTypedDict,
|
|
ToolMessageTypedDict,
|
|
],
|
|
)
|
|
|
|
|
|
Messages = Annotated[
|
|
Union[
|
|
Annotated[AssistantMessage, Tag("assistant")],
|
|
Annotated[SystemMessage, Tag("system")],
|
|
Annotated[ToolMessage, Tag("tool")],
|
|
Annotated[UserMessage, Tag("user")],
|
|
],
|
|
Discriminator(lambda m: get_discriminator(m, "role", "role")),
|
|
]
|
|
|
|
|
|
ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType(
|
|
"ChatCompletionStreamRequestToolChoiceTypedDict",
|
|
Union[ToolChoiceTypedDict, ToolChoiceEnum],
|
|
)
|
|
|
|
|
|
ChatCompletionStreamRequestToolChoice = TypeAliasType(
|
|
"ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum]
|
|
)
|
|
|
|
|
|
class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
model: str
|
|
r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
|
|
messages: List[MessagesTypedDict]
|
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
temperature: NotRequired[Nullable[float]]
|
|
r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
|
|
top_p: NotRequired[float]
|
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
max_tokens: NotRequired[Nullable[int]]
|
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
stream: NotRequired[bool]
|
|
stop: NotRequired[StopTypedDict]
|
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
random_seed: NotRequired[Nullable[int]]
|
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict]
|
|
presence_penalty: NotRequired[float]
|
|
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
frequency_penalty: NotRequired[float]
|
|
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
n: NotRequired[Nullable[int]]
|
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
prediction: NotRequired[PredictionTypedDict]
|
|
parallel_tool_calls: NotRequired[bool]
|
|
|
|
|
|
class ChatCompletionStreamRequest(BaseModel):
|
|
model: str
|
|
r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
|
|
|
|
messages: List[Messages]
|
|
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
|
|
|
|
temperature: OptionalNullable[float] = UNSET
|
|
r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
|
|
|
|
top_p: Optional[float] = None
|
|
r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
|
|
|
|
max_tokens: OptionalNullable[int] = UNSET
|
|
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
|
|
|
|
stream: Optional[bool] = True
|
|
|
|
stop: Optional[Stop] = None
|
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
|
|
random_seed: OptionalNullable[int] = UNSET
|
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
|
|
response_format: Optional[ResponseFormat] = None
|
|
|
|
tools: OptionalNullable[List[Tool]] = UNSET
|
|
|
|
tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None
|
|
|
|
presence_penalty: Optional[float] = None
|
|
r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
|
|
|
|
frequency_penalty: Optional[float] = None
|
|
r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
|
|
|
|
n: OptionalNullable[int] = UNSET
|
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
|
|
prediction: Optional[Prediction] = None
|
|
|
|
parallel_tool_calls: Optional[bool] = None
|
|
|
|
@model_serializer(mode="wrap")
|
|
def serialize_model(self, handler):
|
|
optional_fields = [
|
|
"temperature",
|
|
"top_p",
|
|
"max_tokens",
|
|
"stream",
|
|
"stop",
|
|
"random_seed",
|
|
"response_format",
|
|
"tools",
|
|
"tool_choice",
|
|
"presence_penalty",
|
|
"frequency_penalty",
|
|
"n",
|
|
"prediction",
|
|
"parallel_tool_calls",
|
|
]
|
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
null_default_fields = []
|
|
|
|
serialized = handler(self)
|
|
|
|
m = {}
|
|
|
|
for n, f in self.model_fields.items():
|
|
k = f.alias or n
|
|
val = serialized.get(k)
|
|
serialized.pop(k, None)
|
|
|
|
optional_nullable = k in optional_fields and k in nullable_fields
|
|
is_set = (
|
|
self.__pydantic_fields_set__.intersection({n})
|
|
or k in null_default_fields
|
|
) # pylint: disable=no-member
|
|
|
|
if val is not None and val != UNSET_SENTINEL:
|
|
m[k] = val
|
|
elif val != UNSET_SENTINEL and (
|
|
not k in optional_fields or (optional_nullable and is_set)
|
|
):
|
|
m[k] = val
|
|
|
|
return m
|