From fbb66aacb3760dc7bb558383730f140712068d13 Mon Sep 17 00:00:00 2001 From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com> Date: Tue, 13 Jan 2026 19:29:44 +0000 Subject: [PATCH 1/2] SDK regeneration --- .fern/metadata.json | 2 +- .mock/definition/api.yml | 18 - .../definition/empathic-voice/__package__.yml | 3222 ----------------- .mock/definition/empathic-voice/chat.yml | 149 - .../definition/empathic-voice/chatGroups.yml | 623 ---- .../empathic-voice/chatWebhooks.yml | 58 - .mock/definition/empathic-voice/chats.yml | 503 --- .mock/definition/empathic-voice/configs.yml | 835 ----- .../empathic-voice/controlPlane.yml | 72 - .mock/definition/empathic-voice/prompts.yml | 549 --- .mock/definition/empathic-voice/tools.yml | 617 ---- .../expression-measurement/__package__.yml | 1 - .../batch/__package__.yml | 1814 ---------- .../stream/__package__.yml | 113 - .../expression-measurement/stream/stream.yml | 437 --- .mock/definition/tts/__package__.yml | 928 ----- .mock/definition/tts/streamInput.yml | 96 - .mock/definition/tts/voices.yml | 140 - .mock/fern.config.json | 4 - LICENSE | 2 +- poetry.lock | 533 ++- pyproject.toml | 4 +- reference.md | 96 +- src/hume/core/client_wrapper.py | 4 +- src/hume/empathic_voice/chat/client.py.diff | 87 - .../empathic_voice/chat/raw_client.py.diff | 176 - .../empathic_voice/chat/socket_client.py.diff | 165 - src/hume/empathic_voice/client.py.diff | 203 -- .../batch/types/inference_job.py.diff | 24 - .../expression_measurement/client.py.diff | 70 - .../stream/stream/socket_client.py.diff | 170 - tests/wire/test_tts.py | 18 +- 32 files changed, 316 insertions(+), 11417 deletions(-) delete mode 100644 .mock/definition/api.yml delete mode 100644 .mock/definition/empathic-voice/__package__.yml delete mode 100644 .mock/definition/empathic-voice/chat.yml delete mode 100644 .mock/definition/empathic-voice/chatGroups.yml delete mode 100644 .mock/definition/empathic-voice/chatWebhooks.yml delete mode 100644 .mock/definition/empathic-voice/chats.yml delete mode 100644 .mock/definition/empathic-voice/configs.yml delete mode 100644 .mock/definition/empathic-voice/controlPlane.yml delete mode 100644 .mock/definition/empathic-voice/prompts.yml delete mode 100644 .mock/definition/empathic-voice/tools.yml delete mode 100644 .mock/definition/expression-measurement/__package__.yml delete mode 100644 .mock/definition/expression-measurement/batch/__package__.yml delete mode 100644 .mock/definition/expression-measurement/stream/__package__.yml delete mode 100644 .mock/definition/expression-measurement/stream/stream.yml delete mode 100644 .mock/definition/tts/__package__.yml delete mode 100644 .mock/definition/tts/streamInput.yml delete mode 100644 .mock/definition/tts/voices.yml delete mode 100644 .mock/fern.config.json delete mode 100644 src/hume/empathic_voice/chat/client.py.diff delete mode 100644 src/hume/empathic_voice/chat/raw_client.py.diff delete mode 100644 src/hume/empathic_voice/chat/socket_client.py.diff delete mode 100644 src/hume/empathic_voice/client.py.diff delete mode 100644 src/hume/expression_measurement/batch/types/inference_job.py.diff delete mode 100644 src/hume/expression_measurement/client.py.diff delete mode 100644 src/hume/expression_measurement/stream/stream/socket_client.py.diff diff --git a/.fern/metadata.json b/.fern/metadata.json index 5dea0024..12de21b0 100644 --- a/.fern/metadata.json +++ b/.fern/metadata.json @@ -64,5 +64,5 @@ } ] }, - "sdkVersion": "0.13.6" + "sdkVersion": "0.13.7" } \ No newline at end of file diff --git a/.mock/definition/api.yml b/.mock/definition/api.yml deleted file mode 100644 index 4ae27d7b..00000000 --- a/.mock/definition/api.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: api -error-discrimination: - strategy: status-code -default-environment: prod -default-url: Base -environments: - prod: - urls: - Base: https://api.hume.ai/ - evi: wss://api.hume.ai/v0/evi - tts: wss://api.hume.ai/v0/tts - stream: wss://api.hume.ai/v0/stream -auth: HeaderAuthScheme -auth-schemes: - HeaderAuthScheme: - header: X-Hume-Api-Key - type: optional - name: apiKey diff --git a/.mock/definition/empathic-voice/__package__.yml b/.mock/definition/empathic-voice/__package__.yml deleted file mode 100644 index 435263c7..00000000 --- a/.mock/definition/empathic-voice/__package__.yml +++ /dev/null @@ -1,3222 +0,0 @@ -errors: - UnprocessableEntityError: - status-code: 422 - type: HTTPValidationError - docs: Validation Error - examples: - - value: {} - BadRequestError: - status-code: 400 - type: ErrorResponse - docs: Bad Request - examples: - - value: {} -types: - AssistantEnd: - docs: When provided, the output is an assistant end message. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - type: - type: literal<"assistant_end"> - docs: >- - The type of message sent through the socket; for an Assistant End - message, this must be `assistant_end`. - - - This message indicates the conclusion of the assistant's response, - signaling that the assistant has finished speaking for the current - conversational turn. - source: - openapi: evi-asyncapi.json - AssistantInput: - docs: When provided, the input is spoken by EVI. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - text: - type: string - docs: >- - Assistant text to synthesize into spoken audio and insert into the - conversation. - - - EVI uses this text to generate spoken audio using our proprietary - expressive text-to-speech model. Our model adds appropriate emotional - inflections and tones to the text based on the user's expressions and - the context of the conversation. The synthesized audio is streamed - back to the user as an [Assistant - Message](/reference/speech-to-speech-evi/chat#receive.AssistantMessage). - type: - type: literal<"assistant_input"> - docs: >- - The type of message sent through the socket; must be `assistant_input` - for our server to correctly identify and process it as an Assistant - Input message. - source: - openapi: evi-openapi.json - AssistantMessage: - docs: When provided, the output is an assistant message. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - from_text: - type: boolean - docs: >- - Indicates if this message was inserted into the conversation as text - from an [Assistant Input - message](/reference/speech-to-speech-evi/chat#send.AssistantInput.text). - id: - type: optional - docs: >- - ID of the assistant message. Allows the Assistant Message to be - tracked and referenced. - language: - type: optional - docs: Detected language of the message text. - message: - type: ChatMessage - docs: Transcript of the message. - models: - type: Inference - docs: Inference model results. - type: - type: literal<"assistant_message"> - docs: >- - The type of message sent through the socket; for an Assistant Message, - this must be `assistant_message`. - - - This message contains both a transcript of the assistant's response - and the expression measurement predictions of the assistant's audio - output. - source: - openapi: evi-asyncapi.json - AssistantProsody: - docs: When provided, the output is an Assistant Prosody message. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - id: - type: optional - docs: Unique identifier for the segment. - models: - type: Inference - docs: Inference model results. - type: - type: literal<"assistant_prosody"> - docs: >- - The type of message sent through the socket; for an Assistant Prosody - message, this must be `assistant_PROSODY`. - - - This message the expression measurement predictions of the assistant's - audio output. - source: - openapi: evi-asyncapi.json - AudioConfiguration: - properties: - channels: - type: integer - docs: Number of audio channels. - codec: - type: optional - docs: Optional codec information. - encoding: - type: Encoding - docs: Encoding format of the audio input, such as `linear16`. - sample_rate: - type: integer - docs: >- - Audio sample rate. Number of samples per second in the audio input, - measured in Hertz. - source: - openapi: evi-openapi.json - AudioInput: - docs: When provided, the input is audio. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - data: - type: string - docs: >- - Base64 encoded audio input to insert into the conversation. - - - The content of an Audio Input message is treated as the user's speech - to EVI and must be streamed continuously. Pre-recorded audio files are - not supported. - - - For optimal transcription quality, the audio data should be - transmitted in small chunks. - - - Hume recommends streaming audio with a buffer window of 20 - milliseconds (ms), or 100 milliseconds (ms) for web applications. - type: - type: literal<"audio_input"> - docs: >- - The type of message sent through the socket; must be `audio_input` for - our server to correctly identify and process it as an Audio Input - message. - - - This message is used for sending audio input data to EVI for - processing and expression measurement. Audio data should be sent as a - continuous stream, encoded in Base64. - source: - openapi: evi-openapi.json - AudioOutput: - docs: >- - The type of message sent through the socket; for an Audio Output message, - this must be `audio_output`. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - data: - type: string - docs: >- - Base64 encoded audio output. This encoded audio is transmitted to the - client, where it can be decoded and played back as part of the user - interaction. - id: - type: string - docs: >- - ID of the audio output. Allows the Audio Output message to be tracked - and referenced. - index: - type: integer - docs: Index of the chunk of audio relative to the whole audio segment. - type: - type: literal<"audio_output"> - docs: >- - The type of message sent through the socket; for an Audio Output - message, this must be `audio_output`. - source: - openapi: evi-asyncapi.json - BuiltInTool: - enum: - - web_search - - hang_up - source: - openapi: evi-openapi.json - BuiltinToolConfig: - properties: - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM if the tool call fails. - The LLM then uses this text to generate a response back to the user, - ensuring continuity in the conversation. - name: - type: BuiltInTool - source: - openapi: evi-openapi.json - ChatMessageToolResult: - discriminated: false - docs: Function call response from client. - union: - - type: ToolResponseMessage - - type: ToolErrorMessage - source: - openapi: evi-asyncapi.json - inline: true - ChatMessage: - properties: - content: - type: optional - docs: Transcript of the message. - role: - type: Role - docs: Role of who is providing the message. - tool_call: - type: optional - docs: Function call name and arguments. - tool_result: - type: optional - docs: Function call response from client. - source: - openapi: evi-asyncapi.json - ChatMetadata: - docs: When provided, the output is a chat metadata message. - properties: - chat_group_id: - type: string - docs: >- - ID of the Chat Group. - - - Used to resume a Chat when passed in the - [resumed_chat_group_id](/reference/speech-to-speech-evi/chat#request.query.resumed_chat_group_id) - query parameter of a subsequent connection request. This allows EVI to - continue the conversation from where it left off within the Chat - Group. - - - Learn more about [supporting chat - resumability](/docs/speech-to-speech-evi/faq#does-evi-support-chat-resumability) - from the EVI FAQ. - chat_id: - type: string - docs: >- - ID of the Chat session. Allows the Chat session to be tracked and - referenced. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - request_id: - type: optional - docs: ID of the initiating request. - type: - type: literal<"chat_metadata"> - docs: >- - The type of message sent through the socket; for a Chat Metadata - message, this must be `chat_metadata`. - - - The Chat Metadata message is the first message you receive after - establishing a connection with EVI and contains important identifiers - for the current Chat session. - source: - openapi: evi-asyncapi.json - Context: - properties: - text: - type: string - docs: >- - The context to be injected into the conversation. Helps inform the - LLM's response by providing relevant information about the ongoing - conversation. - - - This text will be appended to the end of - [user_messages](/reference/speech-to-speech-evi/chat#receive.UserMessage.message.content) - based on the chosen persistence level. For example, if you want to - remind EVI of its role as a helpful weather assistant, the context you - insert will be appended to the end of user messages as `{Context: You - are a helpful weather assistant}`. - type: - type: optional - docs: >- - The persistence level of the injected context. Specifies how long the - injected context will remain active in the session. - - - - **Temporary**: Context that is only applied to the following - assistant response. - - - - **Persistent**: Context that is applied to all subsequent assistant - responses for the remainder of the Chat. - source: - openapi: evi-openapi.json - ContextType: - enum: - - persistent - - temporary - source: - openapi: evi-openapi.json - EmotionScores: - properties: - Admiration: double - Adoration: double - Aesthetic Appreciation: double - Amusement: double - Anger: double - Anxiety: double - Awe: double - Awkwardness: double - Boredom: double - Calmness: double - Concentration: double - Confusion: double - Contemplation: double - Contempt: double - Contentment: double - Craving: double - Desire: double - Determination: double - Disappointment: double - Disgust: double - Distress: double - Doubt: double - Ecstasy: double - Embarrassment: double - Empathic Pain: double - Entrancement: double - Envy: double - Excitement: double - Fear: double - Guilt: double - Horror: double - Interest: double - Joy: double - Love: double - Nostalgia: double - Pain: double - Pride: double - Realization: double - Relief: double - Romance: double - Sadness: double - Satisfaction: double - Shame: double - Surprise (negative): double - Surprise (positive): double - Sympathy: double - Tiredness: double - Triumph: double - source: - openapi: evi-openapi.json - Encoding: - type: literal<"linear16"> - WebSocketError: - docs: When provided, the output is an error message. - properties: - code: - type: string - docs: Error code. Identifies the type of error encountered. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - message: - type: string - docs: Detailed description of the error. - request_id: - type: optional - docs: ID of the initiating request. - slug: - type: string - docs: >- - Short, human-readable identifier and description for the error. See a - complete list of error slugs on the [Errors - page](/docs/resources/errors). - type: - type: literal<"error"> - docs: >- - The type of message sent through the socket; for a Web Socket Error - message, this must be `error`. - - - This message indicates a disruption in the WebSocket connection, such - as an unexpected disconnection, protocol error, or data transmission - issue. - source: - openapi: evi-asyncapi.json - ErrorLevel: - type: literal<"warn"> - Inference: - properties: - prosody: - type: optional - docs: >- - Prosody model inference results. - - - EVI uses the prosody model to measure 48 emotions related to speech - and vocal characteristics within a given expression. - source: - openapi: evi-openapi.json - MillisecondInterval: - properties: - begin: - type: integer - docs: Start time of the interval in milliseconds. - end: - type: integer - docs: End time of the interval in milliseconds. - source: - openapi: evi-openapi.json - PauseAssistantMessage: - docs: >- - Pause responses from EVI. Chat history is still saved and sent after - resuming. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - type: - type: literal<"pause_assistant_message"> - docs: >- - The type of message sent through the socket; must be - `pause_assistant_message` for our server to correctly identify and - process it as a Pause Assistant message. - - - Once this message is sent, EVI will not respond until a [Resume - Assistant - message](/reference/speech-to-speech-evi/chat#send.ResumeAssistantMessage) - is sent. When paused, EVI won't respond, but transcriptions of your - audio inputs will still be recorded. - source: - openapi: evi-openapi.json - ProsodyInference: - properties: - scores: - type: EmotionScores - docs: >- - The confidence scores for 48 emotions within the detected expression - of an audio sample. - - - Scores typically range from 0 to 1, with higher values indicating a - stronger confidence level in the measured attribute. - - - See our guide on [interpreting expression measurement - results](/docs/expression-measurement/faq#how-do-i-interpret-my-results) - to learn more. - source: - openapi: evi-openapi.json - ResumeAssistantMessage: - docs: >- - Resume responses from EVI. Chat history sent while paused will now be - sent. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - type: - type: literal<"resume_assistant_message"> - docs: >- - The type of message sent through the socket; must be - `resume_assistant_message` for our server to correctly identify and - process it as a Resume Assistant message. - - - Upon resuming, if any audio input was sent during the pause, EVI will - retain context from all messages sent but only respond to the last - user message. (e.g., If you ask EVI two questions while paused and - then send a `resume_assistant_message`, EVI will respond to the second - question and have added the first question to its conversation - context.) - source: - openapi: evi-openapi.json - Role: - enum: - - assistant - - system - - user - - all - - tool - - context - source: - openapi: evi-openapi.json - SessionSettingsVariablesValue: - discriminated: false - union: - - string - - double - - boolean - source: - openapi: evi-openapi.json - inline: true - SessionSettings: - docs: Settings for this chat session. - properties: - audio: - type: optional - docs: >- - Configuration details for the audio input used during the session. - Ensures the audio is being correctly set up for processing. - - - This optional field is only required when the audio input is encoded - in PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For - detailed instructions on how to configure session settings for PCM - Linear 16 audio, please refer to the [Session Settings - guide](/docs/speech-to-speech-evi/configuration/session-settings). - builtin_tools: - type: optional> - docs: >- - List of built-in tools to enable for the session. - - - Tools are resources used by EVI to perform various tasks, such as - searching the web or calling external APIs. Built-in tools, like web - search, are natively integrated, while user-defined tools are created - and invoked by the user. To learn more, see our [Tool Use - Guide](/docs/speech-to-speech-evi/features/tool-use). - - - Currently, the only built-in tool Hume provides is **Web Search**. - When enabled, Web Search equips EVI with the ability to search the web - for up-to-date information. - context: - type: optional - docs: >- - Field for injecting additional context into the conversation, which is - appended to the end of user messages for the session. - - - When included in a Session Settings message, the provided context can - be used to remind the LLM of its role in every user message, prevent - it from forgetting important details, or add new relevant information - to the conversation. - - - Set to `null` to clear injected context. - custom_session_id: - type: optional - docs: >- - Unique identifier for the session. Used to manage conversational - state, correlate frontend and backend data, and persist conversations - across EVI sessions. - - - If included, the response sent from Hume to your backend will include - this ID. This allows you to correlate frontend users with their - incoming messages. - - - It is recommended to pass a `custom_session_id` if you are using a - Custom Language Model. Please see our guide to [using a custom - language - model](/docs/speech-to-speech-evi/guides/custom-language-model) with - EVI to learn more. - language_model_api_key: - type: optional - docs: >- - Third party API key for the supplemental language model. - - - When provided, EVI will use this key instead of Hume's API key for the - supplemental LLM. This allows you to bypass rate limits and utilize - your own API key as needed. - metadata: optional> - system_prompt: - type: optional - docs: >- - Instructions used to shape EVI's behavior, responses, and style for - the session. - - - When included in a Session Settings message, the provided Prompt - overrides the existing one specified in the EVI configuration. If no - Prompt was defined in the configuration, this Prompt will be the one - used for the session. - - - You can use the Prompt to define a specific goal or role for EVI, - specifying how it should act or what it should focus on during the - conversation. For example, EVI can be instructed to act as a customer - support representative, a fitness coach, or a travel advisor, each - with its own set of behaviors and response styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/speech-to-speech-evi/guides/prompting). - tools: - type: optional> - docs: >- - List of user-defined tools to enable for the session. - - - Tools are resources used by EVI to perform various tasks, such as - searching the web or calling external APIs. Built-in tools, like web - search, are natively integrated, while user-defined tools are created - and invoked by the user. To learn more, see our [Tool Use - Guide](/docs/speech-to-speech-evi/features/tool-use). - type: - type: literal<"session_settings"> - docs: >- - The type of message sent through the socket; must be - `session_settings` for our server to correctly identify and process it - as a Session Settings message. - - - Session settings are temporary and apply only to the current Chat - session. These settings can be adjusted dynamically based on the - requirements of each session to ensure optimal performance and user - experience. - - - For more information, please refer to the [Session Settings - guide](/docs/speech-to-speech-evi/configuration/session-settings). - variables: - type: optional> - docs: >- - This field allows you to assign values to dynamic variables referenced - in your system prompt. - - - Each key represents the variable name, and the corresponding value is - the specific content you wish to assign to that variable within the - session. While the values for variables can be strings, numbers, or - booleans, the value will ultimately be converted to a string when - injected into your system prompt. - - - Using this field, you can personalize responses based on - session-specific details. For more guidance, see our [guide on using - dynamic - variables](/docs/speech-to-speech-evi/features/dynamic-variables). - voice_id: - type: optional - docs: >- - Allows you to change the voice during an active chat. Updating the - voice does not affect chat context or conversation history. - source: - openapi: evi-openapi.json - Tool: - properties: - description: - type: optional - docs: >- - An optional description of what the tool does, used by the - supplemental LLM to choose when and how to call the function. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM if the tool call fails. - The LLM then uses this text to generate a response back to the user, - ensuring continuity in the conversation. - name: - type: string - docs: Name of the user-defined tool to be enabled. - parameters: - type: string - docs: >- - Parameters of the tool. Is a stringified JSON schema. - - - These parameters define the inputs needed for the tool's execution, - including the expected data type and description for each input field. - Structured as a JSON schema, this format ensures the tool receives - data in the expected format. - type: - type: ToolType - docs: Type of tool. Set to `function` for user-defined tools. - source: - openapi: evi-openapi.json - ToolCallMessage: - docs: When provided, the output is a tool call. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - name: - type: string - docs: Name of the tool called. - parameters: - type: string - docs: Parameters of the tool call. Is a stringified JSON schema. - response_required: - type: boolean - docs: >- - Indicates whether a response to the tool call is required from the - developer, either in the form of a [Tool Response - message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Response%20Message.type) - or a [Tool Error - message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Error%20Message.type). - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the correct response is linked to the - appropriate request. - tool_type: - type: ToolType - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - type: - type: optional> - docs: >- - The type of message sent through the socket; for a Tool Call message, - this must be `tool_call`. - - - This message indicates that the supplemental LLM has detected a need - to invoke the specified tool. - source: - openapi: evi-openapi.json - ToolErrorMessage: - docs: When provided, the output is a function call error. - properties: - code: - type: optional - docs: Error code. Identifies the type of error encountered. - content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the tool errors. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - error: - type: string - docs: Error message from the tool call, not exposed to the LLM or user. - level: - type: optional - docs: >- - Indicates the severity of an error; for a Tool Error message, this - must be `warn` to signal an unexpected event. - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the Tool Error message is linked to the - appropriate tool call request. The specified `tool_call_id` must match - the one received in the [Tool Call - message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage). - tool_type: - type: optional - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - type: - type: literal<"tool_error"> - docs: >- - The type of message sent through the socket; for a Tool Error message, - this must be `tool_error`. - - - Upon receiving a [Tool Call - message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage) - and failing to invoke the function, this message is sent to notify EVI - of the tool's failure. - source: - openapi: evi-openapi.json - ToolResponseMessage: - docs: When provided, the output is a function call response. - properties: - content: - type: string - docs: >- - Return value of the tool call. Contains the output generated by the - tool to pass back to EVI. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the correct response is linked to the - appropriate request. The specified `tool_call_id` must match the one - received in the [Tool Call - message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage.tool_call_id). - tool_name: - type: optional - docs: >- - Name of the tool. - - - Include this optional field to help the supplemental LLM identify - which tool generated the response. The specified `tool_name` must - match the one received in the [Tool Call - message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage). - tool_type: - type: optional - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - type: - type: literal<"tool_response"> - docs: >- - The type of message sent through the socket; for a Tool Response - message, this must be `tool_response`. - - - Upon receiving a [Tool Call - message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage) - and successfully invoking the function, this message is sent to convey - the result of the function call back to EVI. - source: - openapi: evi-openapi.json - ToolType: - enum: - - builtin - - function - source: - openapi: evi-openapi.json - UserInput: - docs: >- - User text to insert into the conversation. Text sent through a User Input - message is treated as the user's speech to EVI. EVI processes this input - and provides a corresponding response. - - - Expression measurement results are not available for User Input messages, - as the prosody model relies on audio input and cannot process text alone. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - text: - type: string - docs: >- - User text to insert into the conversation. Text sent through a User - Input message is treated as the user's speech to EVI. EVI processes - this input and provides a corresponding response. - - - Expression measurement results are not available for User Input - messages, as the prosody model relies on audio input and cannot - process text alone. - type: - type: literal<"user_input"> - docs: >- - The type of message sent through the socket; must be `user_input` for - our server to correctly identify and process it as a User Input - message. - source: - openapi: evi-openapi.json - UserInterruption: - docs: When provided, the output is an interruption. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - time: - type: integer - docs: Unix timestamp of the detected user interruption. - type: - type: literal<"user_interruption"> - docs: >- - The type of message sent through the socket; for a User Interruption - message, this must be `user_interruption`. - - - This message indicates the user has interrupted the assistant's - response. EVI detects the interruption in real-time and sends this - message to signal the interruption event. This message allows the - system to stop the current audio playback, clear the audio queue, and - prepare to handle new user input. - source: - openapi: evi-asyncapi.json - UserMessage: - docs: When provided, the output is a user message. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - from_text: - type: boolean - docs: >- - Indicates if this message was inserted into the conversation as text - from a [User - Input](/reference/speech-to-speech-evi/chat#send.UserInput.text) - message. - interim: - type: boolean - docs: >- - Indicates whether this `UserMessage` contains an interim (unfinalized) - transcript. - - - - `true`: the transcript is provisional; words may be repeated or - refined in subsequent `UserMessage` responses as additional audio is - processed. - - - `false`: the transcript is final and complete. - - - Interim transcripts are only sent when the - [`verbose_transcription`](/reference/speech-to-speech-evi/chat#request.query.verbose_transcription) - query parameter is set to `true` in the initial handshake. - language: - type: optional - docs: Detected language of the message text. - message: - type: ChatMessage - docs: Transcript of the message. - models: - type: Inference - docs: Inference model results. - time: - type: MillisecondInterval - docs: Start and End time of user message. - type: - type: literal<"user_message"> - docs: >- - The type of message sent through the socket; for a User Message, this - must be `user_message`. - - - This message contains both a transcript of the user's input and the - expression measurement predictions if the input was sent as an [Audio - Input message](/reference/speech-to-speech-evi/chat#send.AudioInput). - Expression measurement predictions are not provided for a [User Input - message](/reference/speech-to-speech-evi/chat#send.UserInput), as the - prosody model relies on audio input and cannot process text alone. - source: - openapi: evi-asyncapi.json - SubscribeEvent: - discriminated: false - union: - - type: AssistantEnd - docs: When provided, the output is an assistant end message. - - type: AssistantMessage - docs: When provided, the output is an assistant message. - - type: AssistantProsody - docs: When provided, the output is an Assistant Prosody message. - - type: AudioOutput - docs: >- - The type of message sent through the socket; for an Audio Output - message, this must be `audio_output`. - - type: ChatMetadata - docs: When provided, the output is a chat metadata message. - - type: WebSocketError - docs: When provided, the output is an error message. - - type: UserInterruption - docs: When provided, the output is an interruption. - - type: UserMessage - docs: When provided, the output is a user message. - - type: ToolCallMessage - docs: When provided, the output is a tool call. - - type: ToolResponseMessage - docs: When provided, the output is a function call response. - - type: ToolErrorMessage - docs: When provided, the output is a function call error. - source: - openapi: evi-asyncapi.json - JsonMessage: - discriminated: false - union: - - type: AssistantEnd - docs: When provided, the output is an assistant end message. - - type: AssistantMessage - docs: When provided, the output is an assistant message. - - type: AssistantProsody - docs: When provided, the output is an Assistant Prosody message. - - type: ChatMetadata - docs: When provided, the output is a chat metadata message. - - type: WebSocketError - docs: When provided, the output is an error message. - - type: UserInterruption - docs: When provided, the output is an interruption. - - type: UserMessage - docs: When provided, the output is a user message. - - type: ToolCallMessage - docs: When provided, the output is a tool call. - - type: ToolResponseMessage - docs: When provided, the output is a function call response. - - type: ToolErrorMessage - docs: When provided, the output is a function call error. - source: - openapi: evi-asyncapi.json - ConnectSessionSettingsAudio: - docs: >- - Configuration details for the audio input used during the session. Ensures - the audio is being correctly set up for processing. - - - This optional field is only required when the audio input is encoded in - PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For detailed - instructions on how to configure session settings for PCM Linear 16 audio, - please refer to the [Session Settings - section](/docs/empathic-voice-interface-evi/configuration#session-settings) - on the EVI Configuration page. - properties: - channels: - type: optional - docs: Sets number of audio channels for audio input. - encoding: - type: optional - docs: Sets encoding format of the audio input, such as `linear16`. - sample_rate: - type: optional - docs: >- - Sets the sample rate for audio input. (Number of samples per second in - the audio input, measured in Hertz.) - source: - openapi: evi-asyncapi.json - inline: true - ConnectSessionSettingsContext: - docs: >- - Allows developers to inject additional context into the conversation, - which is appended to the end of user messages for the session. - - - When included in a Session Settings message, the provided context can be - used to remind the LLM of its role in every user message, prevent it from - forgetting important details, or add new relevant information to the - conversation. - - - Set to `null` to disable context injection. - properties: - text: - type: optional - docs: >- - The context to be injected into the conversation. Helps inform the - LLM's response by providing relevant information about the ongoing - conversation. - - - This text will be appended to the end of - [user_messages](/reference/speech-to-speech-evi/chat#receive.UserMessage.message.content) - based on the chosen persistence level. For example, if you want to - remind EVI of its role as a helpful weather assistant, the context you - insert will be appended to the end of user messages as `{Context: You - are a helpful weather assistant}`. - type: - type: optional - docs: >- - The persistence level of the injected context. Specifies how long the - injected context will remain active in the session. - - - - **Temporary**: Context that is only applied to the following - assistant response. - - - - **Persistent**: Context that is applied to all subsequent assistant - responses for the remainder of the Chat. - source: - openapi: evi-asyncapi.json - inline: true - ConnectSessionSettingsVariablesValue: - discriminated: false - union: - - string - - double - - boolean - source: - openapi: evi-asyncapi.json - inline: true - ConnectSessionSettings: - properties: - audio: - type: optional - docs: >- - Configuration details for the audio input used during the session. - Ensures the audio is being correctly set up for processing. - - - This optional field is only required when the audio input is encoded - in PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For - detailed instructions on how to configure session settings for PCM - Linear 16 audio, please refer to the [Session Settings - section](/docs/empathic-voice-interface-evi/configuration#session-settings) - on the EVI Configuration page. - context: - type: optional - docs: >- - Allows developers to inject additional context into the conversation, - which is appended to the end of user messages for the session. - - - When included in a Session Settings message, the provided context can - be used to remind the LLM of its role in every user message, prevent - it from forgetting important details, or add new relevant information - to the conversation. - - - Set to `null` to disable context injection. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - event_limit: - type: optional - docs: >- - The maximum number of chat events to return from chat history. By - default, the system returns up to 300 events (100 events per page × 3 - pages). Set this parameter to a smaller value to limit the number of - events returned. - language_model_api_key: - type: optional - docs: >- - Third party API key for the supplemental language model. - - - When provided, EVI will use this key instead of Hume's API key for the - supplemental LLM. This allows you to bypass rate limits and utilize - your own API key as needed. - system_prompt: - type: optional - docs: >- - Instructions used to shape EVI's behavior, responses, and style for - the session. - - - When included in a Session Settings message, the provided Prompt - overrides the existing one specified in the EVI configuration. If no - Prompt was defined in the configuration, this Prompt will be the one - used for the session. - - - You can use the Prompt to define a specific goal or role for EVI, - specifying how it should act or what it should focus on during the - conversation. For example, EVI can be instructed to act as a customer - support representative, a fitness coach, or a travel advisor, each - with its own set of behaviors and response styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/speech-to-speech-evi/guides/prompting). - variables: - type: optional> - docs: >- - This field allows you to assign values to dynamic variables referenced - in your system prompt. - - - Each key represents the variable name, and the corresponding value is - the specific content you wish to assign to that variable within the - session. While the values for variables can be strings, numbers, or - booleans, the value will ultimately be converted to a string when - injected into your system prompt. - - - Using this field, you can personalize responses based on - session-specific details. For more guidance, see our [guide on using - dynamic - variables](/docs/speech-to-speech-evi/features/dynamic-variables). - voice_id: - type: optional - docs: >- - The name or ID of the voice from the `Voice Library` to be used as the - speaker for this EVI session. This will override the speaker set in - the selected configuration. - source: - openapi: evi-asyncapi.json - ControlPlanePublishEvent: - discriminated: false - union: - - type: SessionSettings - docs: Settings for this chat session. - - type: UserInput - docs: >- - User text to insert into the conversation. Text sent through a User - Input message is treated as the user's speech to EVI. EVI processes - this input and provides a corresponding response. - - - Expression measurement results are not available for User Input - messages, as the prosody model relies on audio input and cannot - process text alone. - - type: AssistantInput - docs: When provided, the input is spoken by EVI. - - type: ToolResponseMessage - docs: When provided, the output is a function call response. - - type: ToolErrorMessage - docs: When provided, the output is a function call error. - - type: PauseAssistantMessage - docs: >- - Pause responses from EVI. Chat history is still saved and sent after - resuming. - - type: ResumeAssistantMessage - docs: >- - Resume responses from EVI. Chat history sent while paused will now be - sent. - source: - openapi: evi-openapi.json - ErrorResponse: - properties: - code: optional - error: optional - message: optional - source: - openapi: evi-openapi.json - HTTPValidationError: - properties: - detail: - type: optional> - source: - openapi: evi-openapi.json - LanguageModelType: - enum: - - value: claude-3-7-sonnet-latest - name: Claude37SonnetLatest - - value: claude-3-5-sonnet-latest - name: Claude35SonnetLatest - - value: claude-3-5-haiku-latest - name: Claude35HaikuLatest - - value: claude-3-5-sonnet-20240620 - name: Claude35Sonnet20240620 - - value: claude-3-opus-20240229 - name: Claude3Opus20240229 - - value: claude-3-sonnet-20240229 - name: Claude3Sonnet20240229 - - value: claude-3-haiku-20240307 - name: Claude3Haiku20240307 - - value: claude-sonnet-4-20250514 - name: ClaudeSonnet420250514 - - value: claude-sonnet-4-5-20250929 - name: ClaudeSonnet4520250929 - - value: claude-haiku-4-5-20251001 - name: ClaudeHaiku4520251001 - - value: us.anthropic.claude-3-5-haiku-20241022-v1:0 - name: UsAnthropicClaude35Haiku20241022V10 - - value: us.anthropic.claude-3-5-sonnet-20240620-v1:0 - name: UsAnthropicClaude35Sonnet20240620V10 - - value: us.anthropic.claude-3-haiku-20240307-v1:0 - name: UsAnthropicClaude3Haiku20240307V10 - - value: gpt-oss-120b - name: GptOss120B - - value: qwen-3-235b-a22b - name: Qwen3235BA22B - - value: qwen-3-235b-a22b-instruct-2507 - name: Qwen3235BA22BInstruct2507 - - value: qwen-3-235b-a22b-thinking-2507 - name: Qwen3235BA22BThinking2507 - - value: gemini-1.5-pro - name: Gemini15Pro - - value: gemini-1.5-flash - name: Gemini15Flash - - value: gemini-1.5-pro-002 - name: Gemini15Pro002 - - value: gemini-1.5-flash-002 - name: Gemini15Flash002 - - value: gemini-2.0-flash - name: Gemini20Flash - - value: gemini-2.5-flash - name: Gemini25Flash - - value: gemini-2.5-flash-preview-04-17 - name: Gemini25FlashPreview0417 - - value: gpt-4-turbo - name: Gpt4Turbo - - value: gpt-4-turbo-preview - name: Gpt4TurboPreview - - value: gpt-3.5-turbo-0125 - name: Gpt35Turbo0125 - - value: gpt-3.5-turbo - name: Gpt35Turbo - - value: gpt-4o - name: Gpt4O - - value: gpt-4o-mini - name: Gpt4OMini - - value: gpt-4.1 - name: Gpt41 - - value: gpt-5 - name: Gpt5 - - value: gpt-5-mini - name: Gpt5Mini - - value: gpt-5-nano - name: Gpt5Nano - - value: gpt-4o-priority - name: Gpt4OPriority - - value: gpt-4o-mini-priority - name: Gpt4OMiniPriority - - value: gpt-4.1-priority - name: Gpt41Priority - - value: gpt-5-priority - name: Gpt5Priority - - value: gpt-5-mini-priority - name: Gpt5MiniPriority - - value: gpt-5-nano-priority - name: Gpt5NanoPriority - - value: gemma-7b-it - name: Gemma7BIt - - value: llama3-8b-8192 - name: Llama38B8192 - - value: llama3-70b-8192 - name: Llama370B8192 - - value: llama-3.1-70b-versatile - name: Llama3170BVersatile - - value: llama-3.3-70b-versatile - name: Llama3370BVersatile - - value: llama-3.1-8b-instant - name: Llama318BInstant - - value: moonshotai/kimi-k2-instruct - name: MoonshotaiKimiK2Instruct - - value: accounts/fireworks/models/mixtral-8x7b-instruct - name: AccountsFireworksModelsMixtral8X7BInstruct - - value: accounts/fireworks/models/llama-v3p1-405b-instruct - name: AccountsFireworksModelsLlamaV3P1405BInstruct - - value: accounts/fireworks/models/llama-v3p1-70b-instruct - name: AccountsFireworksModelsLlamaV3P170BInstruct - - value: accounts/fireworks/models/llama-v3p1-8b-instruct - name: AccountsFireworksModelsLlamaV3P18BInstruct - - sonar - - value: sonar-pro - name: SonarPro - - sambanova - - value: DeepSeek-R1-Distill-Llama-70B - name: DeepSeekR1DistillLlama70B - - value: Llama-4-Maverick-17B-128E-Instruct - name: Llama4Maverick17B128EInstruct - - value: Qwen3-32B - name: Qwen332B - - value: grok-4-fast-non-reasoning-latest - name: Grok4FastNonReasoningLatest - - ellm - - value: custom-language-model - name: CustomLanguageModel - - value: hume-evi-3-web-search - name: HumeEvi3WebSearch - source: - openapi: evi-openapi.json - ModelProviderEnum: - enum: - - GROQ - - OPEN_AI - - FIREWORKS - - ANTHROPIC - - CUSTOM_LANGUAGE_MODEL - - GOOGLE - - HUME_AI - - AMAZON_BEDROCK - - PERPLEXITY - - SAMBANOVA - - CEREBRAS - source: - openapi: evi-openapi.json - ReturnChatAudioReconstructionStatus: - enum: - - QUEUED - - IN_PROGRESS - - COMPLETE - - ERROR - - CANCELLED - docs: >- - Indicates the current state of the audio reconstruction job. There are - five possible statuses: - - - - `QUEUED`: The reconstruction job is waiting to be processed. - - - - `IN_PROGRESS`: The reconstruction is currently being processed. - - - - `COMPLETE`: The audio reconstruction is finished and ready for download. - - - - `ERROR`: An error occurred during the reconstruction process. - - - - `CANCELED`: The reconstruction job has been canceled. - inline: true - source: - openapi: evi-openapi.json - ReturnChatAudioReconstruction: - docs: >- - List of chat audio reconstructions returned for the specified page number - and page size. - properties: - filename: - type: optional - docs: Name of the chat audio reconstruction file. - id: - type: string - docs: Identifier for the chat. Formatted as a UUID. - modified_at: - type: optional - docs: >- - The timestamp of the most recent status change for this audio - reconstruction, formatted milliseconds since the Unix epoch. - signed_audio_url: - type: optional - docs: Signed URL used to download the chat audio reconstruction file. - signed_url_expiration_timestamp_millis: - type: optional - docs: >- - The timestamp when the signed URL will expire, formatted as a Unix - epoch milliseconds. - status: - type: ReturnChatAudioReconstructionStatus - docs: >- - Indicates the current state of the audio reconstruction job. There are - five possible statuses: - - - - `QUEUED`: The reconstruction job is waiting to be processed. - - - - `IN_PROGRESS`: The reconstruction is currently being processed. - - - - `COMPLETE`: The audio reconstruction is finished and ready for - download. - - - - `ERROR`: An error occurred during the reconstruction process. - - - - `CANCELED`: The reconstruction job has been canceled. - user_id: - type: string - docs: Identifier for the user that owns this chat. Formatted as a UUID. - source: - openapi: evi-openapi.json - ValidationErrorLocItem: - discriminated: false - union: - - string - - integer - source: - openapi: evi-openapi.json - inline: true - ValidationError: - properties: - loc: - type: list - msg: string - type: string - source: - openapi: evi-openapi.json - WebhookEventBase: - docs: Represents the fields common to all webhook events. - properties: - chat_group_id: - type: string - docs: Unique ID of the **Chat Group** associated with the **Chat** session. - chat_id: - type: string - docs: Unique ID of the **Chat** session. - config_id: - type: optional - docs: Unique ID of the EVI **Config** used for the session. - source: - openapi: evi-openapi.json - WebhookEvent: - discriminated: false - union: - - WebhookEventChatStarted - - WebhookEventChatEnded - - WebhookEventToolCall - source: - openapi: evi-openapi.json - WebhookEventChatEnded: - properties: - caller_number: - type: optional - docs: >- - Phone number of the caller in E.164 format (e.g., `+12223333333`). - This field is included only if the Chat was created via the [Twilio - phone calling](/docs/empathic-voice-interface-evi/phone-calling) - integration. - custom_session_id: - type: optional - docs: >- - User-defined session ID. Relevant only when employing a [custom - language - model](/docs/empathic-voice-interface-evi/custom-language-model) in - the EVI Config. - duration_seconds: - type: integer - docs: Total duration of the session in seconds. - end_reason: - type: WebhookEventChatStatus - docs: Reason for the session's termination. - end_time: - type: integer - docs: Unix timestamp (in milliseconds) indicating when the session ended. - event_name: - type: optional> - docs: Always `chat_ended`. - extends: - - WebhookEventBase - source: - openapi: evi-openapi.json - WebhookEventChatStartType: - enum: - - new_chat_group - - resumed_chat_group - source: - openapi: evi-openapi.json - WebhookEventChatStarted: - properties: - caller_number: - type: optional - docs: >- - Phone number of the caller in E.164 format (e.g., `+12223333333`). - This field is included only if the Chat was created via the [Twilio - phone calling](/docs/empathic-voice-interface-evi/phone-calling) - integration. - chat_start_type: - type: WebhookEventChatStartType - docs: >- - Indicates whether the chat is the first in a new Chat Group - (`new_chat_group`) or the continuation of an existing chat group - (`resumed_chat_group`). - custom_session_id: - type: optional - docs: >- - User-defined session ID. Relevant only when employing a [custom - language - model](/docs/empathic-voice-interface-evi/custom-language-model) in - the EVI Config. - event_name: - type: optional> - docs: Always `chat_started`. - start_time: - type: integer - docs: Unix timestamp (in milliseconds) indicating when the session started. - extends: - - WebhookEventBase - source: - openapi: evi-openapi.json - WebhookEventChatStatus: - enum: - - ACTIVE - - USER_ENDED - - USER_TIMEOUT - - INACTIVITY_TIMEOUT - - MAX_DURATION_TIMEOUT - - SILENCE_TIMEOUT - - ERROR - source: - openapi: evi-openapi.json - WebhookEventToolCall: - properties: - caller_number: - type: optional - docs: >- - Phone number of the caller in E.164 format (e.g., `+12223333333`). - This field is included only if the Chat was created via the [Twilio - phone calling](/docs/empathic-voice-interface-evi/phone-calling) - integration. - custom_session_id: - type: optional - docs: >- - User-defined session ID. Relevant only when employing a [custom - language - model](/docs/empathic-voice-interface-evi/custom-language-model) in - the EVI Config. - event_name: - type: optional> - docs: Always `tool_call`. - timestamp: - type: integer - docs: >- - Unix timestamp (in milliseconds) indicating when the tool call was - triggered. - tool_call_message: - type: ToolCallMessage - docs: The tool call. - extends: - - WebhookEventBase - source: - openapi: evi-openapi.json - PostedBuiltinToolName: - enum: - - web_search - - hang_up - docs: >- - Name of the built-in tool to use. Hume supports the following built-in - tools: - - - - **web_search:** enables EVI to search the web for up-to-date information - when applicable. - - - **hang_up:** closes the WebSocket connection when appropriate (e.g., - after detecting a farewell in the conversation). - - - For more information, see our guide on [using built-in - tools](/docs/speech-to-speech-evi/features/tool-use#using-built-in-tools). - inline: true - source: - openapi: evi-openapi.json - PostedBuiltinTool: - docs: A configuration of a built-in tool to be posted to the server - properties: - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - name: - type: PostedBuiltinToolName - docs: >- - Name of the built-in tool to use. Hume supports the following built-in - tools: - - - - **web_search:** enables EVI to search the web for up-to-date - information when applicable. - - - **hang_up:** closes the WebSocket connection when appropriate (e.g., - after detecting a farewell in the conversation). - - - For more information, see our guide on [using built-in - tools](/docs/speech-to-speech-evi/features/tool-use#using-built-in-tools). - source: - openapi: evi-openapi.json - PostedConfigPromptSpec: - docs: >- - Identifies which prompt to use in a a config OR how to create a new prompt - to use in the config - properties: - id: - type: optional - docs: Identifier for a Prompt. Formatted as a UUID. - text: - type: optional - docs: Text used to create a new prompt for a particular config. - version: - type: optional - docs: >- - Version number for a Prompt. Version numbers should be integers. The - combination of configId and version number is unique. - source: - openapi: evi-openapi.json - PostedEllmModel: - docs: A eLLM model configuration to be posted to the server - properties: - allow_short_responses: - type: optional - docs: |- - Boolean indicating if the eLLM is allowed to generate short responses. - - If omitted, short responses from the eLLM are enabled by default. - source: - openapi: evi-openapi.json - PostedEventMessageSpec: - docs: Settings for a specific event_message to be posted to the server - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this event message is enabled. - - - If set to `true`, a message will be sent when the circumstances for - the specific event are met. - text: - type: optional - docs: >- - Text to use as the event message when the corresponding event occurs. - If no text is specified, EVI will generate an appropriate message - based on its current context and the system prompt. - source: - openapi: evi-openapi.json - PostedEventMessageSpecs: - docs: >- - Collection of event messages returned by the server. - - - Event messages are sent by the server when specific events occur during a - chat session. These messages are used to configure behaviors for EVI, such - as controlling how EVI starts a new conversation. - properties: - on_inactivity_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is about to be - disconnected due to a user inactivity timeout, such as a message - mentioning a lack of user input for a period of time. - - - Enabling an inactivity message allows developers to use this message - event for "checking in" with the user if they are not responding to - see if they are still active. - - - If the user does not respond in the number of seconds specified in the - `inactivity_timeout` field, then EVI will say the message and the user - has 15 seconds to respond. If they respond in time, the conversation - will continue; if not, the conversation will end. - - - However, if the inactivity message is not enabled, then reaching the - inactivity timeout will immediately end the connection. - on_max_duration_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is disconnected due - to reaching the maximum chat duration, such as a message mentioning - the time limit for the chat has been reached. - on_new_chat: - type: optional - docs: >- - Specifies the initial message EVI provides when a new chat is started, - such as a greeting or welcome message. - source: - openapi: evi-openapi.json - PostedLanguageModel: - docs: A LanguageModel to be posted to the server - properties: - model_provider: - type: optional - docs: The provider of the supplemental language model. - model_resource: - type: optional - docs: String that specifies the language model to use with `model_provider`. - temperature: - type: optional - docs: >- - The model temperature, with values between 0 to 1 (inclusive). - - - Controls the randomness of the LLM's output, with values closer to 0 - yielding focused, deterministic responses and values closer to 1 - producing more creative, diverse responses. - source: - openapi: evi-openapi.json - PostedNudgeSpec: - docs: A nudge specification posted to the server - properties: - enabled: - type: optional - docs: >- - If true, EVI will 'nudge' the user to speak after a determined - interval of silence. - interval_secs: - type: optional - docs: The interval of inactivity (in seconds) before a nudge is triggered. - source: - openapi: evi-openapi.json - PostedTimeoutSpec: - docs: Settings for a specific timeout to be posted to the server - properties: - duration_secs: - type: optional - docs: Duration in seconds for the timeout. - enabled: - type: boolean - docs: Boolean indicating if this event message is enabled. - source: - openapi: evi-openapi.json - PostedTimeoutSpecsInactivity: - docs: >- - Specifies the duration of user inactivity (in seconds) after which the EVI - WebSocket connection will be automatically disconnected. Default is 600 - seconds (10 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - properties: - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified duration of - user inactivity being reached. However, the conversation will - eventually disconnect after 1,800 seconds (30 minutes), which is the - maximum WebSocket duration limit for EVI. - source: - openapi: evi-openapi.json - inline: true - PostedTimeoutSpecsMaxDuration: - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI WebSocket - connection before it is automatically disconnected. Default is 1,800 - seconds (30 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - properties: - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified maximum - duration being reached. However, the conversation will eventually - disconnect after 1,800 seconds (30 minutes), which is the maximum - WebSocket duration limit for EVI. - source: - openapi: evi-openapi.json - inline: true - PostedTimeoutSpecs: - docs: >- - Collection of timeout specifications returned by the server. - - - Timeouts are sent by the server when specific time-based events occur - during a chat session. These specifications set the inactivity timeout and - the maximum duration an EVI WebSocket connection can stay open before it - is automatically disconnected. - properties: - inactivity: - type: optional - docs: >- - Specifies the duration of user inactivity (in seconds) after which the - EVI WebSocket connection will be automatically disconnected. Default - is 600 seconds (10 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - max_duration: - type: optional - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI - WebSocket connection before it is automatically disconnected. Default - is 1,800 seconds (30 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - source: - openapi: evi-openapi.json - PostedUserDefinedToolSpec: - docs: A specific tool identifier to be posted to the server - properties: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - source: - openapi: evi-openapi.json - PostedWebhookEventType: - enum: - - chat_started - - chat_ended - - tool_call - docs: Events this URL is subscribed to - inline: true - source: - openapi: evi-openapi.json - PostedWebhookSpec: - docs: URL and settings for a specific webhook to be posted to the server - properties: - events: - docs: >- - The list of events the specified URL is subscribed to. - - - See our [webhooks - guide](/docs/speech-to-speech-evi/configuration/build-a-configuration#supported-events) - for more information on supported events. - type: list - url: - type: string - docs: >- - The URL where event payloads will be sent. This must be a valid https - URL to ensure secure communication. The server at this URL must accept - POST requests with a JSON payload. - source: - openapi: evi-openapi.json - ReturnBuiltinToolToolType: - enum: - - BUILTIN - - FUNCTION - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like web - search, or `FUNCTION` for user-defined tools. - inline: true - source: - openapi: evi-openapi.json - ReturnBuiltinTool: - docs: A specific builtin tool version returned from the server - properties: - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - name: - type: string - docs: Name applied to all versions of a particular Tool. - tool_type: - type: ReturnBuiltinToolToolType - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like - web search, or `FUNCTION` for user-defined tools. - source: - openapi: evi-openapi.json - ReturnChatStatus: - enum: - - ACTIVE - - USER_ENDED - - USER_TIMEOUT - - MAX_DURATION_TIMEOUT - - INACTIVITY_TIMEOUT - - ERROR - docs: >- - Indicates the current state of the chat. There are six possible statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum - allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - inline: true - source: - openapi: evi-openapi.json - ReturnChat: - docs: A description of chat and its status - properties: - chat_group_id: - type: string - docs: >- - Identifier for the Chat Group. Any chat resumed from this Chat will - have the same `chat_group_id`. Formatted as a UUID. - config: optional - end_timestamp: - type: optional - docs: >- - Time at which the Chat ended. Measured in seconds since the Unix - epoch. - event_count: - type: optional - docs: The total number of events currently in this chat. - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat. - start_timestamp: - type: long - docs: >- - Time at which the Chat started. Measured in seconds since the Unix - epoch. - status: - type: ReturnChatStatus - docs: >- - Indicates the current state of the chat. There are six possible - statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the - maximum allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - source: - openapi: evi-openapi.json - ReturnChatEventRole: - enum: - - USER - - AGENT - - SYSTEM - - TOOL - docs: >- - The role of the entity which generated the Chat Event. There are four - possible values: - - - `USER`: The user, capable of sending user messages and interruptions. - - - `AGENT`: The assistant, capable of sending agent messages. - - - `SYSTEM`: The backend server, capable of transmitting errors. - - - `TOOL`: The function calling mechanism. - inline: true - source: - openapi: evi-openapi.json - ReturnChatEventType: - enum: - - FUNCTION_CALL - - FUNCTION_CALL_RESPONSE - - CHAT_END_MESSAGE - - AGENT_MESSAGE - - SYSTEM_PROMPT - - USER_RECORDING_START_MESSAGE - - RESUME_ONSET - - USER_INTERRUPTION - - CHAT_START_MESSAGE - - PAUSE_ONSET - - USER_MESSAGE - docs: >- - Type of Chat Event. There are eleven Chat Event types: - - - `SYSTEM_PROMPT`: The system prompt used to initialize the session. - - - `CHAT_START_MESSAGE`: Marks the beginning of the chat session. - - - `USER_RECORDING_START_MESSAGE`: Marks when the client began streaming - audio and the start of audio processing. - - - `USER_MESSAGE`: A message sent by the user. - - - `USER_INTERRUPTION`: A user-initiated interruption while the assistant - is speaking. - - - `AGENT_MESSAGE`: A response generated by the assistant. - - - `FUNCTION_CALL`: A record of a tool invocation by the assistant. - - - `FUNCTION_CALL_RESPONSE`: The result of a previously invoked function or - tool. - - - `PAUSE_ONSET`: Marks when the client sent a `pause_assistant_message` to - pause the assistant. - - - `RESUME_ONSET`: Marks when the client sent a `resume_assistant_message` - to resume the assistant. - - - `CHAT_END_MESSAGE`: Indicates the end of the chat session. - inline: true - source: - openapi: evi-openapi.json - ReturnChatEvent: - docs: A description of a single event in a chat returned from the server - properties: - chat_id: - type: string - docs: Identifier for the Chat this event occurred in. Formatted as a UUID. - emotion_features: - type: optional - docs: >- - Stringified JSON containing the prosody model inference results. - - - EVI uses the prosody model to measure 48 expressions related to speech - and vocal characteristics. These results contain a detailed emotional - and tonal analysis of the audio. Scores typically range from 0 to 1, - with higher values indicating a stronger confidence level in the - measured attribute. - id: - type: string - docs: Identifier for a Chat Event. Formatted as a UUID. - message_text: - type: optional - docs: >- - The text of the Chat Event. This field contains the message content - for each event type listed in the `type` field. - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat event. - related_event_id: - type: optional - docs: >- - Identifier for a related chat event. Currently only seen on - ASSISTANT_PROSODY events, to point back to the ASSISTANT_MESSAGE that - generated these prosody scores - role: - type: ReturnChatEventRole - docs: >- - The role of the entity which generated the Chat Event. There are four - possible values: - - - `USER`: The user, capable of sending user messages and - interruptions. - - - `AGENT`: The assistant, capable of sending agent messages. - - - `SYSTEM`: The backend server, capable of transmitting errors. - - - `TOOL`: The function calling mechanism. - timestamp: - type: long - docs: >- - Time at which the Chat Event occurred. Measured in seconds since the - Unix epoch. - type: - type: ReturnChatEventType - docs: >- - Type of Chat Event. There are eleven Chat Event types: - - - `SYSTEM_PROMPT`: The system prompt used to initialize the session. - - - `CHAT_START_MESSAGE`: Marks the beginning of the chat session. - - - `USER_RECORDING_START_MESSAGE`: Marks when the client began - streaming audio and the start of audio processing. - - - `USER_MESSAGE`: A message sent by the user. - - - `USER_INTERRUPTION`: A user-initiated interruption while the - assistant is speaking. - - - `AGENT_MESSAGE`: A response generated by the assistant. - - - `FUNCTION_CALL`: A record of a tool invocation by the assistant. - - - `FUNCTION_CALL_RESPONSE`: The result of a previously invoked - function or tool. - - - `PAUSE_ONSET`: Marks when the client sent a - `pause_assistant_message` to pause the assistant. - - - `RESUME_ONSET`: Marks when the client sent a - `resume_assistant_message` to resume the assistant. - - - `CHAT_END_MESSAGE`: Indicates the end of the chat session. - source: - openapi: evi-openapi.json - ReturnChatGroup: - docs: A description of chat_group and its status - properties: - active: - type: optional - docs: >- - Denotes whether there is an active Chat associated with this Chat - Group. - first_start_timestamp: - type: long - docs: >- - Time at which the first Chat in this Chat Group was created. Measured - in seconds since the Unix epoch. - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - most_recent_chat_id: - type: optional - docs: >- - The `chat_id` of the most recent Chat in this Chat Group. Formatted as - a UUID. - most_recent_config: optional - most_recent_start_timestamp: - type: long - docs: >- - Time at which the most recent Chat in this Chat Group was created. - Measured in seconds since the Unix epoch. - num_chats: - type: integer - docs: The total number of Chats in this Chat Group. - source: - openapi: evi-openapi.json - ReturnChatGroupPagedAudioReconstructionsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnChatGroupPagedAudioReconstructions: - docs: A paginated list of chat reconstructions for a particular chatgroup - properties: - audio_reconstructions_page: - docs: >- - List of chat audio reconstructions returned for the specified page - number and page size. - type: list - id: - type: string - docs: Identifier for the chat group. Formatted as a UUID. - num_chats: - type: integer - docs: Total number of chats in this chatgroup - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnChatGroupPagedAudioReconstructionsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - total_pages: - type: integer - docs: The total number of pages in the collection. - user_id: - type: string - docs: Identifier for the user that owns this chat. Formatted as a UUID. - source: - openapi: evi-openapi.json - ReturnChatGroupPagedChatsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnChatGroupPagedChats: - docs: >- - A description of chat_group and its status with a paginated list of each - chat in the chat_group - properties: - active: - type: optional - docs: >- - Denotes whether there is an active Chat associated with this Chat - Group. - chats_page: - docs: List of Chats for the specified `page_number` and `page_size`. - type: list - first_start_timestamp: - type: long - docs: >- - Time at which the first Chat in this Chat Group was created. Measured - in seconds since the Unix epoch. - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - most_recent_start_timestamp: - type: long - docs: >- - Time at which the most recent Chat in this Chat Group was created. - Measured in seconds since the Unix epoch. - num_chats: - type: integer - docs: The total number of Chats associated with this Chat Group. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnChatGroupPagedChatsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnChatGroupPagedEventsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnChatGroupPagedEvents: - docs: >- - A paginated list of chat events that occurred across chats in this - chat_group from the server - properties: - events_page: - docs: List of Chat Events for the specified `page_number` and `page_size`. - type: list - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnChatGroupPagedEventsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnChatPagedEventsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnChatPagedEventsStatus: - enum: - - ACTIVE - - USER_ENDED - - USER_TIMEOUT - - MAX_DURATION_TIMEOUT - - INACTIVITY_TIMEOUT - - ERROR - docs: >- - Indicates the current state of the chat. There are six possible statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum - allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - inline: true - source: - openapi: evi-openapi.json - ReturnChatPagedEvents: - docs: >- - A description of chat status with a paginated list of chat events returned - from the server - properties: - chat_group_id: - type: string - docs: >- - Identifier for the Chat Group. Any chat resumed from this Chat will - have the same `chat_group_id`. Formatted as a UUID. - config: optional - end_timestamp: - type: optional - docs: >- - Time at which the Chat ended. Measured in seconds since the Unix - epoch. - events_page: - docs: List of Chat Events for the specified `page_number` and `page_size`. - type: list - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnChatPagedEventsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - start_timestamp: - type: long - docs: >- - Time at which the Chat started. Measured in seconds since the Unix - epoch. - status: - type: ReturnChatPagedEventsStatus - docs: >- - Indicates the current state of the chat. There are six possible - statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the - maximum allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnConfig: - docs: A specific config version returned from the server - properties: - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this config - created_on: - type: optional - docs: The timestamp when the first version of this config was created. - ellm_model: optional - event_messages: optional - evi_version: - type: optional - docs: The version of the EVI used with this config. - id: - type: optional - docs: Identifier for a Config. Formatted as a UUID. - language_model: optional - modified_on: - type: optional - docs: The timestamp when this version of the config was created. - name: - type: optional - docs: Name applied to all versions of a particular Config. - nudges: optional - prompt: optional - timeouts: optional - tools: - type: optional>> - docs: List of user-defined tools associated with this config. - version: - type: optional - docs: >- - Version number for a Config. Version numbers should be integers. The - combination of configId and version number is unique. - version_description: - type: optional - docs: Description that is appended to a specific version of a Config. - voice: optional - webhooks: - type: optional>> - docs: Map of webhooks associated with this config. - source: - openapi: evi-openapi.json - ReturnConfigSpec: - docs: The Config associated with this Chat. - properties: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - source: - openapi: evi-openapi.json - ReturnEllmModel: - docs: A specific eLLM Model configuration - properties: - allow_short_responses: - type: boolean - docs: |- - Boolean indicating if the eLLM is allowed to generate short responses. - - If omitted, short responses from the eLLM are enabled by default. - source: - openapi: evi-openapi.json - ReturnEventMessageSpec: - docs: A specific event message configuration to be returned from the server - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this event message is enabled. - - - If set to `true`, a message will be sent when the circumstances for - the specific event are met. - text: - type: optional - docs: >- - Text to use as the event message when the corresponding event occurs. - If no text is specified, EVI will generate an appropriate message - based on its current context and the system prompt. - source: - openapi: evi-openapi.json - ReturnEventMessageSpecs: - docs: >- - Collection of event messages returned by the server. - - - Event messages are sent by the server when specific events occur during a - chat session. These messages are used to configure behaviors for EVI, such - as controlling how EVI starts a new conversation. - properties: - on_inactivity_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is about to be - disconnected due to a user inactivity timeout, such as a message - mentioning a lack of user input for a period of time. - - - Enabling an inactivity message allows developers to use this message - event for "checking in" with the user if they are not responding to - see if they are still active. - - - If the user does not respond in the number of seconds specified in the - `inactivity_timeout` field, then EVI will say the message and the user - has 15 seconds to respond. If they respond in time, the conversation - will continue; if not, the conversation will end. - - - However, if the inactivity message is not enabled, then reaching the - inactivity timeout will immediately end the connection. - on_max_duration_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is disconnected due - to reaching the maximum chat duration, such as a message mentioning - the time limit for the chat has been reached. - on_new_chat: - type: optional - docs: >- - Specifies the initial message EVI provides when a new chat is started, - such as a greeting or welcome message. - source: - openapi: evi-openapi.json - ReturnLanguageModel: - docs: A specific LanguageModel - properties: - model_provider: - type: optional - docs: The provider of the supplemental language model. - model_resource: - type: optional - docs: String that specifies the language model to use with `model_provider`. - temperature: - type: optional - docs: >- - The model temperature, with values between 0 to 1 (inclusive). - - - Controls the randomness of the LLM's output, with values closer to 0 - yielding focused, deterministic responses and values closer to 1 - producing more creative, diverse responses. - source: - openapi: evi-openapi.json - ReturnNudgeSpec: - docs: A specific nudge configuration returned from the server - properties: - enabled: - type: boolean - docs: EVI will nudge user after inactivity - interval_secs: - type: optional - docs: Time interval in seconds after which the nudge will be sent. - source: - openapi: evi-openapi.json - ReturnPagedChatGroupsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnPagedChatGroups: - docs: A paginated list of chat_groups returned from the server - properties: - chat_groups_page: - docs: >- - List of Chat Groups and their metadata returned for the specified - `page_number` and `page_size`. - type: list - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnPagedChatGroupsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnPagedChatsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnPagedChats: - docs: A paginated list of chats returned from the server - properties: - chats_page: - docs: >- - List of Chats and their metadata returned for the specified - `page_number` and `page_size`. - type: list - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnPagedChatsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnPagedConfigs: - docs: A paginated list of config versions returned from the server - properties: - configs_page: - type: optional> - docs: >- - List of configs returned for the specified `page_number` and - `page_size`. - page_number: - type: optional - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: optional - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnPagedPrompts: - docs: A paginated list of prompt versions returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - prompts_page: - docs: >- - List of prompts returned for the specified `page_number` and - `page_size`. - type: list> - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnPagedUserDefinedTools: - docs: A paginated list of user defined tool versions returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - tools_page: - docs: >- - List of tools returned for the specified `page_number` and - `page_size`. - type: list> - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnPromptVersionType: - enum: - - FIXED - - LATEST - docs: >- - Versioning method for a Prompt. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - inline: true - source: - openapi: evi-openapi.json - ReturnPrompt: - docs: A specific prompt version returned from the server - properties: - created_on: - type: long - docs: The timestamp when the first version of this prompt was created. - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - modified_on: - type: long - docs: The timestamp when this version of the prompt was created. - name: - type: string - docs: Name applied to all versions of a particular Prompt. - text: - type: string - docs: Text used for this version of the Prompt. - version: - type: integer - docs: >- - Version number for a Prompt. Version numbers should be integers. The - combination of configId and version number is unique. - version_description: - type: optional - docs: Description that is appended to a specific version of a Prompt. - version_type: - type: string - docs: >- - Indicates whether this prompt is using a fixed version number or - auto-updating to the latest version. Values from the VersionType enum. - source: - openapi: evi-openapi.json - ReturnTimeoutSpec: - docs: A specific timeout configuration to be returned from the server - properties: - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified duration - being reached. However, the conversation will eventually disconnect - after 1,800 seconds (30 minutes), which is the maximum WebSocket - duration limit for EVI. - source: - openapi: evi-openapi.json - ReturnTimeoutSpecs: - docs: >- - Collection of timeout specifications returned by the server. - - - Timeouts are sent by the server when specific time-based events occur - during a chat session. These specifications set the inactivity timeout and - the maximum duration an EVI WebSocket connection can stay open before it - is automatically disconnected. - properties: - inactivity: - type: ReturnTimeoutSpec - docs: >- - Specifies the duration of user inactivity (in seconds) after which the - EVI WebSocket connection will be automatically disconnected. Default - is 600 seconds (10 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - max_duration: - type: ReturnTimeoutSpec - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI - WebSocket connection before it is automatically disconnected. Default - is 1,800 seconds (30 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - source: - openapi: evi-openapi.json - ReturnUserDefinedToolToolType: - enum: - - BUILTIN - - FUNCTION - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like web - search, or `FUNCTION` for user-defined tools. - inline: true - source: - openapi: evi-openapi.json - ReturnUserDefinedToolVersionType: - enum: - - FIXED - - LATEST - docs: >- - Versioning method for a Tool. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - inline: true - source: - openapi: evi-openapi.json - ReturnUserDefinedTool: - docs: A specific tool version returned from the server - properties: - created_on: - type: long - docs: >- - Time at which the Tool was created. Measured in seconds since the Unix - epoch. - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - modified_on: - type: long - docs: >- - Time at which the Tool was last modified. Measured in seconds since - the Unix epoch. - name: - type: string - docs: Name applied to all versions of a particular Tool. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of the - Tool. - - - These parameters define the inputs needed for the Tool's execution, - including the expected data type and description for each input field. - Structured as a stringified JSON schema, this format ensures the tool - receives data in the expected format. - tool_type: - type: ReturnUserDefinedToolToolType - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like - web search, or `FUNCTION` for user-defined tools. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - version_description: - type: optional - docs: An optional description of the Tool version. - version_type: - type: ReturnUserDefinedToolVersionType - docs: >- - Versioning method for a Tool. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - source: - openapi: evi-openapi.json - ReturnWebhookEventType: - enum: - - chat_started - - chat_ended - - tool_call - docs: Events this URL is subscribed to - inline: true - source: - openapi: evi-openapi.json - ReturnWebhookSpec: - docs: Collection of webhook URL endpoints to be returned from the server - properties: - events: - docs: >- - The list of events the specified URL is subscribed to. - - - See our [webhooks - guide](/docs/speech-to-speech-evi/configuration/build-a-configuration#supported-events) - for more information on supported events. - type: list - url: - type: string - docs: >- - The URL where event payloads will be sent. This must be a valid https - URL to ensure secure communication. The server at this URL must accept - POST requests with a JSON payload. - source: - openapi: evi-openapi.json - VoiceId: - properties: - id: - type: string - docs: ID of the voice in the `Voice Library`. - provider: - type: optional - docs: Model provider associated with this Voice ID. - source: - openapi: evi-openapi.json - VoiceName: - properties: - name: - type: string - docs: Name of the voice in the `Voice Library`. - provider: - type: optional - docs: Model provider associated with this Voice Name. - source: - openapi: evi-openapi.json - VoiceRef: - discriminated: false - union: - - type: VoiceId - - type: VoiceName - source: - openapi: evi-openapi.json - ReturnVoice: - docs: An Octave voice available for text-to-speech - properties: - compatible_octave_models: optional> - id: optional - name: optional - provider: optional - source: - openapi: evi-openapi.json - VoiceProvider: - enum: - - HUME_AI - - CUSTOM_VOICE - source: - openapi: evi-openapi.json diff --git a/.mock/definition/empathic-voice/chat.yml b/.mock/definition/empathic-voice/chat.yml deleted file mode 100644 index c69e2ab8..00000000 --- a/.mock/definition/empathic-voice/chat.yml +++ /dev/null @@ -1,149 +0,0 @@ -imports: - root: __package__.yml -channel: - path: /chat - url: evi - auth: false - docs: Chat with Empathic Voice Interface (EVI) - query-parameters: - access_token: - type: optional - default: '' - docs: >- - Access token used for authenticating the client. If not provided, an - `api_key` must be provided to authenticate. - - - The access token is generated using both an API key and a Secret key, - which provides an additional layer of security compared to using just an - API key. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - allow_connection: - type: optional - default: false - docs: Allows external connections to this chat via the /connect endpoint. - config_id: - type: optional - docs: >- - The unique identifier for an EVI configuration. - - - Include this ID in your connection request to equip EVI with the Prompt, - Language Model, Voice, and Tools associated with the specified - configuration. If omitted, EVI will apply [default configuration - settings](/docs/speech-to-speech-evi/configuration/build-a-configuration#default-configuration). - - - For help obtaining this ID, see our [Configuration - Guide](/docs/speech-to-speech-evi/configuration). - config_version: - type: optional - docs: >- - The version number of the EVI configuration specified by the - `config_id`. - - - Configs, as well as Prompts and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine configurations and revert to previous versions if needed. - - - Include this parameter to apply a specific version of an EVI - configuration. If omitted, the latest version will be applied. - event_limit: - type: optional - docs: >- - The maximum number of chat events to return from chat history. By - default, the system returns up to 300 events (100 events per page × 3 - pages). Set this parameter to a smaller value to limit the number of - events returned. - resumed_chat_group_id: - type: optional - docs: >- - The unique identifier for a Chat Group. Use this field to preserve - context from a previous Chat session. - - - A Chat represents a single session from opening to closing a WebSocket - connection. In contrast, a Chat Group is a series of resumed Chats that - collectively represent a single conversation spanning multiple sessions. - Each Chat includes a Chat Group ID, which is used to preserve the - context of previous Chat sessions when starting a new one. - - - Including the Chat Group ID in the `resumed_chat_group_id` query - parameter is useful for seamlessly resuming a Chat after unexpected - network disconnections and for picking up conversations exactly where - you left off at a later time. This ensures preserved context across - multiple sessions. - - - There are three ways to obtain the Chat Group ID: - - - - [Chat - Metadata](/reference/speech-to-speech-evi/chat#receive.ChatMetadata): - Upon establishing a WebSocket connection with EVI, the user receives a - Chat Metadata message. This message contains a `chat_group_id`, which - can be used to resume conversations within this chat group in future - sessions. - - - - [List Chats - endpoint](/reference/speech-to-speech-evi/chats/list-chats): Use the GET - `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat - sessions. This endpoint lists all available Chat sessions and their - associated Chat Group ID. - - - - [List Chat Groups - endpoint](/reference/speech-to-speech-evi/chat-groups/list-chat-groups): - Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs - of all Chat Groups associated with an API key. This endpoint returns a - list of all available chat groups. - verbose_transcription: - type: optional - default: false - docs: >- - A flag to enable verbose transcription. Set this query parameter to - `true` to have unfinalized user transcripts be sent to the client as - interim UserMessage messages. The - [interim](/reference/speech-to-speech-evi/chat#receive.UserMessage.interim) - field on a - [UserMessage](/reference/speech-to-speech-evi/chat#receive.UserMessage) - denotes whether the message is "interim" or "final." - api_key: - type: optional - default: '' - docs: >- - API key used for authenticating the client. If not provided, an - `access_token` must be provided to authenticate. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - session_settings: root.ConnectSessionSettings - messages: - publish: - origin: client - body: PublishEvent - subscribe: - origin: server - body: root.SubscribeEvent -types: - PublishEvent: - discriminated: false - union: - - type: root.AudioInput - - type: root.SessionSettings - - type: root.UserInput - - type: root.AssistantInput - - type: root.ToolResponseMessage - - type: root.ToolErrorMessage - - type: root.PauseAssistantMessage - - type: root.ResumeAssistantMessage - source: - openapi: evi-asyncapi.json diff --git a/.mock/definition/empathic-voice/chatGroups.yml b/.mock/definition/empathic-voice/chatGroups.yml deleted file mode 100644 index b60ec5bb..00000000 --- a/.mock/definition/empathic-voice/chatGroups.yml +++ /dev/null @@ -1,623 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list-chat-groups: - path: /v0/evi/chat_groups - method: GET - docs: Fetches a paginated list of **Chat Groups**. - pagination: - offset: $request.page_number - results: $response.chat_groups_page - source: - openapi: evi-openapi.json - display-name: List chat_groups - request: - name: ChatGroupsListChatGroupsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - config_id: - type: optional - docs: >- - The unique identifier for an EVI configuration. - - - Filter Chat Groups to only include Chats that used this - `config_id` in their most recent Chat. - validation: - format: uuid - response: - docs: Success - type: root.ReturnPagedChatGroups - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chat_groups_page: - - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - first_start_timestamp: 1721844196397 - most_recent_start_timestamp: 1721861821717 - active: false - most_recent_chat_id: dfdbdd4d-0ddf-418b-8fc4-80a266579d36 - num_chats: 5 - get-chat-group: - path: /v0/evi/chat_groups/{id} - method: GET - docs: >- - Fetches a **ChatGroup** by ID, including a paginated list of **Chats** - associated with the **ChatGroup**. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Chat Group. Formatted as a UUID. - display-name: Get chat_group - request: - name: ChatGroupsGetChatGroupRequest - query-parameters: - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatGroupPagedChats - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - response: - body: - id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 - first_start_timestamp: 1712334213647 - most_recent_start_timestamp: 1712334213647 - num_chats: 1 - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chats_page: - - id: 6375d4f8-cd3e-4d6b-b13b-ace66b7c8aaa - chat_group_id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 - status: USER_ENDED - start_timestamp: 1712334213647 - end_timestamp: 1712334332571 - event_count: 0 - metadata: null - config: null - active: false - get-audio: - path: /v0/evi/chat_groups/{id}/audio - method: GET - docs: >- - Fetches a paginated list of audio for each **Chat** within the specified - **Chat Group**. For more details, see our guide on audio reconstruction - [here](/docs/speech-to-speech-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Chat Group. Formatted as a UUID. - display-name: Get chat group audio - request: - name: ChatGroupsGetAudioRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatGroupPagedAudioReconstructions - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 - query-parameters: - page_number: 0 - page_size: 10 - ascending_order: true - response: - body: - id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 - user_id: e6235940-cfda-3988-9147-ff531627cf42 - num_chats: 1 - page_number: 0 - page_size: 10 - total_pages: 1 - pagination_direction: ASC - audio_reconstructions_page: - - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - user_id: e6235940-cfda-3988-9147-ff531627cf42 - status: COMPLETE - filename: >- - e6235940-cfda-3988-9147-ff531627cf42/470a49f6-1dec-4afe-8b61-035d3b2d63b0/reconstructed_audio.mp4 - modified_at: 1729875432555 - signed_audio_url: https://storage.googleapis.com/...etc. - signed_url_expiration_timestamp_millis: 1730232816964 - list-chat-group-events: - path: /v0/evi/chat_groups/{id}/events - method: GET - docs: >- - Fetches a paginated list of **Chat** events associated with a **Chat - Group**. - pagination: - offset: $request.page_number - results: $response.events_page - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Chat Group. Formatted as a UUID. - display-name: List chat events from a specific chat_group - request: - name: ChatGroupsListChatGroupEventsRequest - query-parameters: - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatGroupPagedEvents - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - query-parameters: - page_number: 0 - page_size: 3 - ascending_order: true - response: - body: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - page_number: 0 - page_size: 3 - total_pages: 1 - pagination_direction: ASC - events_page: - - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244940762 - role: SYSTEM - type: SYSTEM_PROMPT - message_text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - emotion_features: '' - metadata: '' - - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244956278 - role: USER - type: USER_MESSAGE - message_text: Hello. - emotion_features: >- - {"Admiration": 0.09906005859375, "Adoration": - 0.12213134765625, "Aesthetic Appreciation": - 0.05035400390625, "Amusement": 0.16552734375, "Anger": - 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": - 0.058197021484375, "Awkwardness": 0.10552978515625, - "Boredom": 0.1141357421875, "Calmness": 0.115234375, - "Concentration": 0.00444793701171875, "Confusion": - 0.0343017578125, "Contemplation": 0.00812530517578125, - "Contempt": 0.009002685546875, "Contentment": - 0.087158203125, "Craving": 0.00818634033203125, "Desire": - 0.018310546875, "Determination": 0.003238677978515625, - "Disappointment": 0.024169921875, "Disgust": - 0.00702667236328125, "Distress": 0.00936126708984375, - "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, - "Embarrassment": 0.01800537109375, "Empathic Pain": - 0.0088348388671875, "Entrancement": 0.013397216796875, - "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": - 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": - 0.00274658203125, "Interest": 0.2142333984375, "Joy": - 0.29638671875, "Love": 0.16015625, "Nostalgia": - 0.007843017578125, "Pain": 0.007160186767578125, "Pride": - 0.00508880615234375, "Realization": 0.054229736328125, - "Relief": 0.048736572265625, "Romance": 0.026397705078125, - "Sadness": 0.0265350341796875, "Satisfaction": - 0.051361083984375, "Shame": 0.00974273681640625, "Surprise - (negative)": 0.0218963623046875, "Surprise (positive)": - 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": - 0.0173797607421875, "Triumph": 0.004520416259765625} - metadata: >- - {"segments": [{"content": "Hello.", "embedding": - [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, - 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, - 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, - 0.416259765625, 0.99462890625, -0.333740234375, - 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, - 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, - 0.228515625, 2.087890625, -0.311767578125, - 0.053680419921875, 1.3349609375, 0.95068359375, - 0.00441741943359375, 0.705078125, 1.8916015625, - -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, - 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, - -0.28857421875, -0.4560546875, -0.1500244140625, - -0.1102294921875, -0.222412109375, 0.8779296875, - 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, - -0.325439453125, 0.412841796875, 0.81689453125, - 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, - 1.50390625, 1.0224609375, -1.671875, 0.7373046875, - 2.1328125, 2.166015625, 0.41015625, -0.127685546875, - 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, - 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, - -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, - -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, - -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, - -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, - 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, - 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, - 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, - 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, - 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, - -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, - 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, - -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, - -0.00984954833984375, -0.6865234375, -0.0272979736328125, - -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, - 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, - 0.384521484375, 0.385986328125, 2.0546875, - -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, - 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, - -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, - -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, - -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, - 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, - -0.1552734375, 0.6474609375, -0.08331298828125, - 0.00740814208984375, -0.045501708984375, -0.578125, - 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, - 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, - -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, - 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, - 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, - 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, - 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, - 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, - 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, - 1.99609375, 1.171875, 1.181640625, 1.5126953125, - 0.0224456787109375, 0.58349609375, -1.4931640625, - 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, - 1.7802734375, 0.01526641845703125, -0.423095703125, - 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, - 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, - 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, - 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, - 0.69384765625, 1.375, 0.8916015625, 1.0107421875, - 0.1304931640625, 2.009765625, 0.06402587890625, - -0.08428955078125, 0.04351806640625, -1.7529296875, - 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, - -0.276611328125, 0.8837890625, -0.1287841796875, - 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, - 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, - 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, - 1.3623046875, 2.267578125, 0.484375, 0.9150390625, - 0.56787109375, -0.70068359375, 0.27587890625, - -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, - 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, - 1.328125, 1.232421875, 0.6806640625, 0.9365234375, - 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, - 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, - 1.34765625, 2.8203125, 2.025390625, -0.48583984375, - 0.7626953125, 0.01007843017578125, 1.435546875, - 0.007205963134765625, 0.05157470703125, -0.9853515625, - 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, - -0.07916259765625, 1.244140625, -0.32080078125, - 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, - 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, - 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, - 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, - -0.263427734375, -0.019866943359375, -0.24658203125, - -0.1871337890625, 0.927734375, 0.62255859375, - 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, - -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, - 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, - 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, - -1.92578125, 1.154296875, 0.389892578125, 1.130859375, - 0.95947265625, 0.72314453125, 2.244140625, - 0.048553466796875, 0.626953125, 0.42919921875, - 0.82275390625, 0.311767578125, -0.320556640625, - 0.01041412353515625, 0.1483154296875, 0.10809326171875, - -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, - 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, - 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, - 0.39208984375, 0.83251953125, 0.224365234375, - 0.0019989013671875, 0.87548828125, 1.6572265625, - 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, - 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, - 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, - 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, - 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, - -0.40283203125, 4.109375, 2.533203125, 1.2529296875, - 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, - 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, - -0.79443359375, 0.71630859375, 0.97998046875, - -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, - 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, - -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, - 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, - -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, - -1.947265625, 1.3544921875, -3.935546875, 2.544921875, - 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, - -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, - -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, - 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, - 14.0703125, -2.0078125, -0.381591796875, 1.228515625, - 0.08282470703125, -0.67822265625, -0.04339599609375, - 0.397216796875, 0.1656494140625, 0.137451171875, - 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, - 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, - 0.232177734375, -0.020172119140625, 0.64404296875, - -0.01100921630859375, -1.9267578125, 0.222412109375, - 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, - 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, - 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, - 1.828125, 1.115234375, 1.931640625, -0.517578125, - 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, - 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, - 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, - 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, - 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, - 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, - 2.046875, 3.212890625, 1.68359375, 1.07421875, - -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, - 0.2440185546875, 0.62646484375, -0.1280517578125, - 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, - 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, - 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, - 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, - 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, - 0.08807373046875, 0.18505859375, 0.8857421875, - -0.57177734375, 0.251708984375, 0.234375, 2.57421875, - 0.9599609375, 0.5029296875, 0.10382080078125, - 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, - 0.259765625, 2.015625, 2.828125, -0.3095703125, - -0.164306640625, -0.3408203125, 0.486572265625, - 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, - 0.00972747802734375, -0.83154296875, 1.755859375, - 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, - -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, - 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, - -0.454833984375, 0.75439453125, 0.68505859375, - 0.210693359375, -0.283935546875, -0.53564453125, - 0.96826171875, 0.861328125, -3.33984375, -0.26171875, - 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, - -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, - -0.5380859375, 0.1529541015625, -0.360595703125, - -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, - 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, - 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, - 1.392578125, 0.5068359375, 0.962890625, 0.736328125, - 1.55078125, 0.50390625, -0.398681640625, 2.361328125, - 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, - -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, - 0.1119384765625, -0.1834716796875, 1.4599609375, - -0.77587890625, 0.5556640625, 0.09954833984375, - 0.0285186767578125, 0.58935546875, -0.501953125, - 0.212890625, 0.02679443359375, 0.1715087890625, - 0.03466796875, -0.564453125, 2.029296875, 2.45703125, - -0.72216796875, 2.138671875, 0.50830078125, - -0.09356689453125, 0.230224609375, 1.6943359375, - 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, - -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, - 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, - 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, - -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, - 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, - 1.5791015625, -0.0921630859375, 0.484619140625, - 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, - -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, - 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, - 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, - 1.30859375, 1.0859375, 0.56494140625, 2.322265625, - 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, - 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, - 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, - -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, - -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, - 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, - -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, - 0.8427734375, 2.431640625, 0.66357421875, 3.203125, - 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, - 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, - 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, - 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, - 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, - 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, - 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, - -1.1396484375, 1.6533203125, 0.375244140625], "scores": - [0.09906005859375, 0.12213134765625, 0.05035400390625, - 0.16552734375, 0.0037384033203125, 0.010101318359375, - 0.058197021484375, 0.10552978515625, 0.1141357421875, - 0.115234375, 0.00444793701171875, 0.00812530517578125, - 0.0343017578125, 0.009002685546875, 0.087158203125, - 0.00818634033203125, 0.003238677978515625, 0.024169921875, - 0.00702667236328125, 0.00936126708984375, - 0.00632476806640625, 0.0293731689453125, 0.01800537109375, - 0.0088348388671875, 0.013397216796875, 0.02557373046875, - 0.12109375, 0.004413604736328125, 0.016571044921875, - 0.00274658203125, 0.2142333984375, 0.29638671875, - 0.16015625, 0.007843017578125, 0.007160186767578125, - 0.00508880615234375, 0.054229736328125, 0.048736572265625, - 0.026397705078125, 0.0265350341796875, 0.051361083984375, - 0.018310546875, 0.00974273681640625, 0.0218963623046875, - 0.216064453125, 0.021728515625, 0.0173797607421875, - 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, - 374, 303, 303, 303, 427], "time": {"begin_ms": 640, - "end_ms": 1140}}]} - - id: 7645a0d1-2e64-410d-83a8-b96040432e9a - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244957031 - role: AGENT - type: AGENT_MESSAGE - message_text: Hello! - emotion_features: >- - {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, - "Aesthetic Appreciation": 0.03265380859375, "Amusement": - 0.118408203125, "Anger": 0.06719970703125, "Anxiety": - 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": - 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": - 0.08709716796875, "Concentration": 0.070556640625, - "Confusion": 0.06964111328125, "Contemplation": - 0.0343017578125, "Contempt": 0.037689208984375, - "Contentment": 0.059417724609375, "Craving": - 0.01132965087890625, "Desire": 0.01406097412109375, - "Determination": 0.1143798828125, "Disappointment": - 0.051177978515625, "Disgust": 0.028594970703125, "Distress": - 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": - 0.0258026123046875, "Embarrassment": 0.0222015380859375, - "Empathic Pain": 0.015777587890625, "Entrancement": - 0.0160980224609375, "Envy": 0.0163421630859375, - "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": - 0.01483917236328125, "Horror": 0.0194549560546875, - "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": - 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": - 0.020721435546875, "Pride": 0.05499267578125, "Realization": - 0.0728759765625, "Relief": 0.04052734375, "Romance": - 0.0129241943359375, "Sadness": 0.0254669189453125, - "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, - "Surprise (negative)": 0.05560302734375, "Surprise - (positive)": 0.07965087890625, "Sympathy": - 0.022247314453125, "Tiredness": 0.0194549560546875, - "Triumph": 0.04107666015625} - metadata: '' - source: - openapi: evi-openapi.json diff --git a/.mock/definition/empathic-voice/chatWebhooks.yml b/.mock/definition/empathic-voice/chatWebhooks.yml deleted file mode 100644 index 7f3da91b..00000000 --- a/.mock/definition/empathic-voice/chatWebhooks.yml +++ /dev/null @@ -1,58 +0,0 @@ -imports: - root: __package__.yml -webhooks: - chatEnded: - audiences: [] - method: POST - display-name: Chat Ended - headers: {} - payload: root.WebhookEventChatEnded - examples: - - payload: - chat_group_id: chat_group_id - chat_id: chat_id - config_id: null - caller_number: null - custom_session_id: null - duration_seconds: 1 - end_reason: ACTIVE - end_time: 1 - docs: Sent when an EVI chat ends. - chatStarted: - audiences: [] - method: POST - display-name: Chat Started - headers: {} - payload: root.WebhookEventChatStarted - examples: - - payload: - chat_group_id: chat_group_id - chat_id: chat_id - config_id: null - caller_number: null - chat_start_type: new_chat_group - custom_session_id: null - start_time: 1 - docs: Sent when an EVI chat is started. - toolCall: - audiences: [] - method: POST - display-name: Tool Call - headers: {} - payload: root.WebhookEventToolCall - examples: - - payload: - chat_group_id: chat_group_id - chat_id: chat_id - config_id: null - caller_number: null - custom_session_id: null - timestamp: 1 - tool_call_message: - custom_session_id: null - name: name - parameters: parameters - response_required: true - tool_call_id: tool_call_id - tool_type: builtin - docs: Sent when EVI triggers a tool call diff --git a/.mock/definition/empathic-voice/chats.yml b/.mock/definition/empathic-voice/chats.yml deleted file mode 100644 index 7ceb5503..00000000 --- a/.mock/definition/empathic-voice/chats.yml +++ /dev/null @@ -1,503 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list-chats: - path: /v0/evi/chats - method: GET - docs: Fetches a paginated list of **Chats**. - pagination: - offset: $request.page_number - results: $response.chats_page - source: - openapi: evi-openapi.json - display-name: List chats - request: - name: ChatsListChatsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - config_id: - type: optional - docs: Filter to only include chats that used this config. - validation: - format: uuid - response: - docs: Success - type: root.ReturnPagedChats - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chats_page: - - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f - status: USER_ENDED - start_timestamp: 1716244940648 - end_timestamp: 1716244958546 - event_count: 3 - metadata: '' - config: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - list-chat-events: - path: /v0/evi/chats/{id} - method: GET - docs: Fetches a paginated list of **Chat** events. - pagination: - offset: $request.page_number - results: $response.events_page - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - display-name: List chat events - request: - name: ChatsListChatEventsRequest - query-parameters: - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatPagedEvents - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - query-parameters: - page_number: 0 - page_size: 3 - ascending_order: true - response: - body: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f - status: USER_ENDED - start_timestamp: 1716244940648 - pagination_direction: ASC - events_page: - - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244940762 - role: SYSTEM - type: SYSTEM_PROMPT - message_text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - emotion_features: '' - metadata: '' - - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244956278 - role: USER - type: USER_MESSAGE - message_text: Hello. - emotion_features: >- - {"Admiration": 0.09906005859375, "Adoration": - 0.12213134765625, "Aesthetic Appreciation": - 0.05035400390625, "Amusement": 0.16552734375, "Anger": - 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": - 0.058197021484375, "Awkwardness": 0.10552978515625, - "Boredom": 0.1141357421875, "Calmness": 0.115234375, - "Concentration": 0.00444793701171875, "Confusion": - 0.0343017578125, "Contemplation": 0.00812530517578125, - "Contempt": 0.009002685546875, "Contentment": - 0.087158203125, "Craving": 0.00818634033203125, "Desire": - 0.018310546875, "Determination": 0.003238677978515625, - "Disappointment": 0.024169921875, "Disgust": - 0.00702667236328125, "Distress": 0.00936126708984375, - "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, - "Embarrassment": 0.01800537109375, "Empathic Pain": - 0.0088348388671875, "Entrancement": 0.013397216796875, - "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": - 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": - 0.00274658203125, "Interest": 0.2142333984375, "Joy": - 0.29638671875, "Love": 0.16015625, "Nostalgia": - 0.007843017578125, "Pain": 0.007160186767578125, "Pride": - 0.00508880615234375, "Realization": 0.054229736328125, - "Relief": 0.048736572265625, "Romance": 0.026397705078125, - "Sadness": 0.0265350341796875, "Satisfaction": - 0.051361083984375, "Shame": 0.00974273681640625, "Surprise - (negative)": 0.0218963623046875, "Surprise (positive)": - 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": - 0.0173797607421875, "Triumph": 0.004520416259765625} - metadata: >- - {"segments": [{"content": "Hello.", "embedding": - [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, - 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, - 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, - 0.416259765625, 0.99462890625, -0.333740234375, - 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, - 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, - 0.228515625, 2.087890625, -0.311767578125, - 0.053680419921875, 1.3349609375, 0.95068359375, - 0.00441741943359375, 0.705078125, 1.8916015625, - -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, - 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, - -0.28857421875, -0.4560546875, -0.1500244140625, - -0.1102294921875, -0.222412109375, 0.8779296875, - 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, - -0.325439453125, 0.412841796875, 0.81689453125, - 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, - 1.50390625, 1.0224609375, -1.671875, 0.7373046875, - 2.1328125, 2.166015625, 0.41015625, -0.127685546875, - 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, - 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, - -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, - -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, - -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, - -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, - 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, - 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, - 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, - 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, - 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, - -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, - 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, - -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, - -0.00984954833984375, -0.6865234375, -0.0272979736328125, - -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, - 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, - 0.384521484375, 0.385986328125, 2.0546875, - -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, - 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, - -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, - -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, - -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, - 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, - -0.1552734375, 0.6474609375, -0.08331298828125, - 0.00740814208984375, -0.045501708984375, -0.578125, - 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, - 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, - -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, - 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, - 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, - 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, - 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, - 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, - 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, - 1.99609375, 1.171875, 1.181640625, 1.5126953125, - 0.0224456787109375, 0.58349609375, -1.4931640625, - 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, - 1.7802734375, 0.01526641845703125, -0.423095703125, - 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, - 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, - 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, - 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, - 0.69384765625, 1.375, 0.8916015625, 1.0107421875, - 0.1304931640625, 2.009765625, 0.06402587890625, - -0.08428955078125, 0.04351806640625, -1.7529296875, - 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, - -0.276611328125, 0.8837890625, -0.1287841796875, - 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, - 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, - 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, - 1.3623046875, 2.267578125, 0.484375, 0.9150390625, - 0.56787109375, -0.70068359375, 0.27587890625, - -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, - 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, - 1.328125, 1.232421875, 0.6806640625, 0.9365234375, - 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, - 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, - 1.34765625, 2.8203125, 2.025390625, -0.48583984375, - 0.7626953125, 0.01007843017578125, 1.435546875, - 0.007205963134765625, 0.05157470703125, -0.9853515625, - 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, - -0.07916259765625, 1.244140625, -0.32080078125, - 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, - 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, - 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, - 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, - -0.263427734375, -0.019866943359375, -0.24658203125, - -0.1871337890625, 0.927734375, 0.62255859375, - 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, - -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, - 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, - 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, - -1.92578125, 1.154296875, 0.389892578125, 1.130859375, - 0.95947265625, 0.72314453125, 2.244140625, - 0.048553466796875, 0.626953125, 0.42919921875, - 0.82275390625, 0.311767578125, -0.320556640625, - 0.01041412353515625, 0.1483154296875, 0.10809326171875, - -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, - 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, - 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, - 0.39208984375, 0.83251953125, 0.224365234375, - 0.0019989013671875, 0.87548828125, 1.6572265625, - 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, - 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, - 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, - 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, - 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, - -0.40283203125, 4.109375, 2.533203125, 1.2529296875, - 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, - 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, - -0.79443359375, 0.71630859375, 0.97998046875, - -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, - 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, - -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, - 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, - -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, - -1.947265625, 1.3544921875, -3.935546875, 2.544921875, - 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, - -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, - -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, - 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, - 14.0703125, -2.0078125, -0.381591796875, 1.228515625, - 0.08282470703125, -0.67822265625, -0.04339599609375, - 0.397216796875, 0.1656494140625, 0.137451171875, - 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, - 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, - 0.232177734375, -0.020172119140625, 0.64404296875, - -0.01100921630859375, -1.9267578125, 0.222412109375, - 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, - 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, - 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, - 1.828125, 1.115234375, 1.931640625, -0.517578125, - 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, - 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, - 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, - 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, - 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, - 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, - 2.046875, 3.212890625, 1.68359375, 1.07421875, - -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, - 0.2440185546875, 0.62646484375, -0.1280517578125, - 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, - 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, - 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, - 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, - 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, - 0.08807373046875, 0.18505859375, 0.8857421875, - -0.57177734375, 0.251708984375, 0.234375, 2.57421875, - 0.9599609375, 0.5029296875, 0.10382080078125, - 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, - 0.259765625, 2.015625, 2.828125, -0.3095703125, - -0.164306640625, -0.3408203125, 0.486572265625, - 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, - 0.00972747802734375, -0.83154296875, 1.755859375, - 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, - -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, - 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, - -0.454833984375, 0.75439453125, 0.68505859375, - 0.210693359375, -0.283935546875, -0.53564453125, - 0.96826171875, 0.861328125, -3.33984375, -0.26171875, - 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, - -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, - -0.5380859375, 0.1529541015625, -0.360595703125, - -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, - 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, - 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, - 1.392578125, 0.5068359375, 0.962890625, 0.736328125, - 1.55078125, 0.50390625, -0.398681640625, 2.361328125, - 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, - -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, - 0.1119384765625, -0.1834716796875, 1.4599609375, - -0.77587890625, 0.5556640625, 0.09954833984375, - 0.0285186767578125, 0.58935546875, -0.501953125, - 0.212890625, 0.02679443359375, 0.1715087890625, - 0.03466796875, -0.564453125, 2.029296875, 2.45703125, - -0.72216796875, 2.138671875, 0.50830078125, - -0.09356689453125, 0.230224609375, 1.6943359375, - 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, - -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, - 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, - 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, - -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, - 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, - 1.5791015625, -0.0921630859375, 0.484619140625, - 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, - -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, - 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, - 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, - 1.30859375, 1.0859375, 0.56494140625, 2.322265625, - 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, - 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, - 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, - -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, - -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, - 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, - -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, - 0.8427734375, 2.431640625, 0.66357421875, 3.203125, - 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, - 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, - 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, - 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, - 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, - 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, - 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, - -1.1396484375, 1.6533203125, 0.375244140625], "scores": - [0.09906005859375, 0.12213134765625, 0.05035400390625, - 0.16552734375, 0.0037384033203125, 0.010101318359375, - 0.058197021484375, 0.10552978515625, 0.1141357421875, - 0.115234375, 0.00444793701171875, 0.00812530517578125, - 0.0343017578125, 0.009002685546875, 0.087158203125, - 0.00818634033203125, 0.003238677978515625, 0.024169921875, - 0.00702667236328125, 0.00936126708984375, - 0.00632476806640625, 0.0293731689453125, 0.01800537109375, - 0.0088348388671875, 0.013397216796875, 0.02557373046875, - 0.12109375, 0.004413604736328125, 0.016571044921875, - 0.00274658203125, 0.2142333984375, 0.29638671875, - 0.16015625, 0.007843017578125, 0.007160186767578125, - 0.00508880615234375, 0.054229736328125, 0.048736572265625, - 0.026397705078125, 0.0265350341796875, 0.051361083984375, - 0.018310546875, 0.00974273681640625, 0.0218963623046875, - 0.216064453125, 0.021728515625, 0.0173797607421875, - 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, - 374, 303, 303, 303, 427], "time": {"begin_ms": 640, - "end_ms": 1140}}]} - - id: 7645a0d1-2e64-410d-83a8-b96040432e9a - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244957031 - role: AGENT - type: AGENT_MESSAGE - message_text: Hello! - emotion_features: >- - {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, - "Aesthetic Appreciation": 0.03265380859375, "Amusement": - 0.118408203125, "Anger": 0.06719970703125, "Anxiety": - 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": - 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": - 0.08709716796875, "Concentration": 0.070556640625, - "Confusion": 0.06964111328125, "Contemplation": - 0.0343017578125, "Contempt": 0.037689208984375, - "Contentment": 0.059417724609375, "Craving": - 0.01132965087890625, "Desire": 0.01406097412109375, - "Determination": 0.1143798828125, "Disappointment": - 0.051177978515625, "Disgust": 0.028594970703125, "Distress": - 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": - 0.0258026123046875, "Embarrassment": 0.0222015380859375, - "Empathic Pain": 0.015777587890625, "Entrancement": - 0.0160980224609375, "Envy": 0.0163421630859375, - "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": - 0.01483917236328125, "Horror": 0.0194549560546875, - "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": - 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": - 0.020721435546875, "Pride": 0.05499267578125, "Realization": - 0.0728759765625, "Relief": 0.04052734375, "Romance": - 0.0129241943359375, "Sadness": 0.0254669189453125, - "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, - "Surprise (negative)": 0.05560302734375, "Surprise - (positive)": 0.07965087890625, "Sympathy": - 0.022247314453125, "Tiredness": 0.0194549560546875, - "Triumph": 0.04107666015625} - metadata: '' - page_number: 0 - page_size: 3 - total_pages: 1 - end_timestamp: 1716244958546 - metadata: '' - config: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - get-audio: - path: /v0/evi/chats/{id}/audio - method: GET - docs: >- - Fetches the audio of a previous **Chat**. For more details, see our - guide on audio reconstruction - [here](/docs/speech-to-speech-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a chat. Formatted as a UUID. - display-name: Get chat audio - response: - docs: Success - type: root.ReturnChatAudioReconstruction - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - response: - body: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - user_id: e6235940-cfda-3988-9147-ff531627cf42 - status: COMPLETE - filename: >- - e6235940-cfda-3988-9147-ff531627cf42/470a49f6-1dec-4afe-8b61-035d3b2d63b0/reconstructed_audio.mp4 - modified_at: 1729875432555 - signed_audio_url: https://storage.googleapis.com/...etc. - signed_url_expiration_timestamp_millis: 1730232816964 - source: - openapi: evi-openapi.json diff --git a/.mock/definition/empathic-voice/configs.yml b/.mock/definition/empathic-voice/configs.yml deleted file mode 100644 index 06995d3c..00000000 --- a/.mock/definition/empathic-voice/configs.yml +++ /dev/null @@ -1,835 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list-configs: - path: /v0/evi/configs - method: GET - docs: >- - Fetches a paginated list of **Configs**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - pagination: - offset: $request.page_number - results: $response.configs_page - source: - openapi: evi-openapi.json - display-name: List configs - request: - name: ConfigsListConfigsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each tool. To include all versions of - each tool in the list, set `restrict_to_most_recent` to false. - name: - type: optional - docs: Filter to only include configs with this name. - response: - docs: Success - type: root.ReturnPagedConfigs - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 1 - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - configs_page: - - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: '' - name: Weather Assistant Config - created_on: 1715267200693 - modified_on: 1715267200693 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to - user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. - Include helpful tips if severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - create-config: - path: /v0/evi/configs - method: POST - docs: >- - Creates a **Config** which can be applied to EVI. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - display-name: Create config - request: - name: PostedConfig - body: - properties: - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this Config. - ellm_model: - type: optional - docs: >- - The eLLM setup associated with this Config. - - - Hume's eLLM (empathic Large Language Model) is a multimodal - language model that takes into account both expression measures - and language. The eLLM generates short, empathic language - responses and guides text-to-speech (TTS) prosody. - event_messages: optional - evi_version: - type: string - docs: >- - EVI version to use. Only versions `3` and `4-mini` are - supported. - language_model: - type: optional - docs: >- - The supplemental language model associated with this Config. - - - This model is used to generate longer, more detailed responses - from EVI. Choosing an appropriate supplemental language model - for your use case is crucial for generating fast, high-quality - responses from EVI. - name: - type: string - docs: Name applied to all versions of a particular Config. - nudges: - type: optional - docs: >- - Configures nudges, brief audio prompts that can guide - conversations when users pause or need encouragement to continue - speaking. Nudges help create more natural, flowing interactions - by providing gentle conversational cues. - prompt: optional - timeouts: optional - tools: - type: optional>> - docs: List of user-defined tools associated with this Config. - version_description: - type: optional - docs: An optional description of the Config version. - voice: - type: optional - docs: A voice specification associated with this Config. - webhooks: - type: optional>> - docs: Webhook config specifications for each subscriber. - content-type: application/json - response: - docs: Created - type: root.ReturnConfig - status-code: 201 - errors: - - root.BadRequestError - examples: - - request: - name: Weather Assistant Config - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - evi_version: '3' - voice: - provider: HUME_AI - name: Ava Song - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: '' - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - list-config-versions: - path: /v0/evi/configs/{id} - method: GET - docs: >- - Fetches a list of a **Config's** versions. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - pagination: - offset: $request.page_number - results: $response.configs_page - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: List config versions - request: - name: ConfigsListConfigVersionsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each config. To include all versions of - each config in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedConfigs - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - configs_page: - - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: '' - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to - user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. - Include helpful tips if severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - create-config-version: - path: /v0/evi/configs/{id} - method: POST - docs: >- - Updates a **Config** by creating a new version of the **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Create config version - request: - name: PostedConfigVersion - body: - properties: - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this Config version. - ellm_model: - type: optional - docs: >- - The eLLM setup associated with this Config version. - - - Hume's eLLM (empathic Large Language Model) is a multimodal - language model that takes into account both expression measures - and language. The eLLM generates short, empathic language - responses and guides text-to-speech (TTS) prosody. - event_messages: optional - evi_version: - type: string - docs: The version of the EVI used with this config. - language_model: - type: optional - docs: >- - The supplemental language model associated with this Config - version. - - - This model is used to generate longer, more detailed responses - from EVI. Choosing an appropriate supplemental language model - for your use case is crucial for generating fast, high-quality - responses from EVI. - nudges: optional - prompt: optional - timeouts: optional - tools: - type: optional>> - docs: List of user-defined tools associated with this Config version. - version_description: - type: optional - docs: An optional description of the Config version. - voice: - type: optional - docs: A voice specification associated with this Config version. - webhooks: - type: optional>> - docs: Webhook config specifications for each subscriber. - content-type: application/json - response: - docs: Created - type: root.ReturnConfig - status-code: 201 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - request: - version_description: This is an updated version of the Weather Assistant Config. - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - voice: - provider: HUME_AI - name: Ava Song - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: true - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: This is an updated version of the Weather Assistant Config. - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1722642242998 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: true - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - delete-config: - path: /v0/evi/configs/{id} - method: DELETE - docs: >- - Deletes a **Config** and its versions. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Delete config - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - update-config-name: - path: /v0/evi/configs/{id} - method: PATCH - docs: >- - Updates the name of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Update config name - request: - name: PostedConfigName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Config. - content-type: application/json - response: - docs: Success - type: text - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - request: - name: Updated Weather Assistant Config Name - get-config-version: - path: /v0/evi/configs/{id}/version/{version} - method: GET - docs: >- - Fetches a specified version of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Get config version - response: - docs: Success - type: root.ReturnConfig - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: '' - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - delete-config-version: - path: /v0/evi/configs/{id}/version/{version} - method: DELETE - docs: >- - Deletes a specified version of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Delete config version - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - update-config-description: - path: /v0/evi/configs/{id}/version/{version} - method: PATCH - docs: >- - Updates the description of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Update config description - request: - name: PostedConfigVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Config version. - content-type: application/json - response: - docs: Success - type: root.ReturnConfig - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - request: - version_description: This is an updated version_description. - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: This is an updated version_description. - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - source: - openapi: evi-openapi.json diff --git a/.mock/definition/empathic-voice/controlPlane.yml b/.mock/definition/empathic-voice/controlPlane.yml deleted file mode 100644 index 83d760a1..00000000 --- a/.mock/definition/empathic-voice/controlPlane.yml +++ /dev/null @@ -1,72 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - send: - path: /v0/evi/chat/{chat_id}/send - method: POST - docs: Send a message to a specific chat. - source: - openapi: evi-openapi.json - path-parameters: - chat_id: string - display-name: Send Message - request: - body: root.ControlPlanePublishEvent - content-type: application/json - errors: - - root.UnprocessableEntityError - examples: - - path-parameters: - chat_id: chat_id - request: - type: session_settings - source: - openapi: evi-openapi.json -channel: - path: /chat/{chat_id}/connect - url: evi - auth: false - display-name: Control Plane - docs: >- - Connects to an in-progress EVI chat session. The original chat must have - been started with `allow_connection=true`. The connection can be used to - send and receive the same messages as the original chat, with the exception - that `audio_input` messages are not allowed. - path-parameters: - chat_id: - type: string - docs: The ID of the chat to connect to. - query-parameters: - access_token: - type: optional - default: '' - docs: >- - Access token used for authenticating the client. If not provided, an - `api_key` must be provided to authenticate. - - - The access token is generated using both an API key and a Secret key, - which provides an additional layer of security compared to using just an - API key. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - messages: - publish: - origin: client - body: root.ControlPlanePublishEvent - subscribe: - origin: server - body: root.SubscribeEvent - examples: - - messages: - - type: publish - body: - type: session_settings - - type: subscribe - body: - type: assistant_end diff --git a/.mock/definition/empathic-voice/prompts.yml b/.mock/definition/empathic-voice/prompts.yml deleted file mode 100644 index 10898960..00000000 --- a/.mock/definition/empathic-voice/prompts.yml +++ /dev/null @@ -1,549 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list-prompts: - path: /v0/evi/prompts - method: GET - docs: >- - Fetches a paginated list of **Prompts**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - pagination: - offset: $request.page_number - results: $response.prompts_page - source: - openapi: evi-openapi.json - display-name: List prompts - request: - name: PromptsListPromptsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: Only include the most recent version of each prompt in the list. - name: - type: optional - docs: Filter to only include prompts with name. - response: - docs: Success - type: root.ReturnPagedPrompts - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 2 - response: - body: - page_number: 0 - page_size: 2 - total_pages: 1 - prompts_page: - - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - - id: 616b2b4c-a096-4445-9c23-64058b564fc2 - version: 0 - version_type: FIXED - version_description: '' - name: Web Search Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI web search assistant designed to help - users find accurate and relevant information on the web. - Respond to user queries promptly, using the built-in web - search tool to retrieve up-to-date results. Present - information clearly and concisely, summarizing key points - where necessary. Use simple language and avoid technical - jargon. If needed, provide helpful tips for refining search - queries to obtain better results. - create-prompt: - path: /v0/evi/prompts - method: POST - docs: >- - Creates a **Prompt** that can be added to an [EVI - configuration](/reference/speech-to-speech-evi/configs/create-config). - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - display-name: Create prompt - request: - name: PostedPrompt - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Prompt. - text: - type: string - docs: >- - Instructions used to shape EVI's behavior, responses, and style. - - - You can use the Prompt to define a specific goal or role for - EVI, specifying how it should act or what it should focus on - during the conversation. For example, EVI can be instructed to - act as a customer support representative, a fitness coach, or a - travel advisor, each with its own set of behaviors and response - styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/speech-to-speech-evi/guides/prompting). - version_description: - type: optional - docs: An optional description of the Prompt version. - content-type: application/json - response: - docs: Created - type: optional - status-code: 201 - errors: - - root.BadRequestError - examples: - - request: - name: Weather Assistant Prompt - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if severe - weather is expected. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: null - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - list-prompt-versions: - path: /v0/evi/prompts/{id} - method: GET - docs: >- - Fetches a list of a **Prompt's** versions. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: List prompt versions - request: - name: PromptsListPromptVersionsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each prompt. To include all versions of - each prompt in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedPrompts - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - prompts_page: - - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - create-prompt-version: - path: /v0/evi/prompts/{id} - method: POST - docs: >- - Updates a **Prompt** by creating a new version of the **Prompt**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Create prompt version - request: - name: PostedPromptVersion - body: - properties: - text: - type: string - docs: >- - Instructions used to shape EVI's behavior, responses, and style - for this version of the Prompt. - - - You can use the Prompt to define a specific goal or role for - EVI, specifying how it should act or what it should focus on - during the conversation. For example, EVI can be instructed to - act as a customer support representative, a fitness coach, or a - travel advisor, each with its own set of behaviors and response - styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/speech-to-speech-evi/guides/prompting). - version_description: - type: optional - docs: An optional description of the Prompt version. - content-type: application/json - response: - docs: Created - type: optional - status-code: 201 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - request: - text: >- - You are an updated version of an AI weather assistant - providing users with accurate and up-to-date weather information. - Respond to user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, precipitation, - wind conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - version_description: This is an updated version of the Weather Assistant Prompt. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - version_type: FIXED - version_description: This is an updated version of the Weather Assistant Prompt. - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722635140150 - text: >- - You are an updated version of an AI weather assistant - providing users with accurate and up-to-date weather - information. Respond to user queries concisely and clearly. Use - simple language and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. Include - helpful tips if severe weather is expected. - delete-prompt: - path: /v0/evi/prompts/{id} - method: DELETE - docs: >- - Deletes a **Prompt** and its versions. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Delete prompt - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - update-prompt-name: - path: /v0/evi/prompts/{id} - method: PATCH - docs: >- - Updates the name of a **Prompt**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Update prompt name - request: - name: PostedPromptName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Prompt. - content-type: application/json - response: - docs: Success - type: text - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - request: - name: Updated Weather Assistant Prompt Name - get-prompt-version: - path: /v0/evi/prompts/{id}/version/{version} - method: GET - docs: >- - Fetches a specified version of a **Prompt**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, Configs, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Get prompt version - response: - docs: Success - type: optional - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - delete-prompt-version: - path: /v0/evi/prompts/{id}/version/{version} - method: DELETE - docs: >- - Deletes a specified version of a **Prompt**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, Configs, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Delete prompt version - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - update-prompt-description: - path: /v0/evi/prompts/{id}/version/{version} - method: PATCH - docs: >- - Updates the description of a **Prompt**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, Configs, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Update prompt description - request: - name: PostedPromptVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Prompt version. - content-type: application/json - response: - docs: Success - type: optional - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - request: - version_description: This is an updated version_description. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - version_type: FIXED - version_description: This is an updated version_description. - name: string - created_on: 1722633247488 - modified_on: 1722634770585 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - source: - openapi: evi-openapi.json diff --git a/.mock/definition/empathic-voice/tools.yml b/.mock/definition/empathic-voice/tools.yml deleted file mode 100644 index 646e997e..00000000 --- a/.mock/definition/empathic-voice/tools.yml +++ /dev/null @@ -1,617 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list-tools: - path: /v0/evi/tools - method: GET - docs: >- - Fetches a paginated list of **Tools**. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - pagination: - offset: $request.page_number - results: $response.tools_page - source: - openapi: evi-openapi.json - display-name: List tools - request: - name: ToolsListToolsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each tool. To include all versions of - each tool in the list, set `restrict_to_most_recent` to false. - name: - type: optional - docs: Filter to only include tools with name. - response: - docs: Success - type: root.ReturnPagedUserDefinedTools - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 2 - response: - body: - page_number: 0 - page_size: 2 - total_pages: 1 - tools_page: - - tool_type: FUNCTION - id: d20827af-5d8d-4f66-b6b9-ce2e3e1ea2b2 - version: 0 - version_type: FIXED - version_description: Fetches user's current location. - name: get_current_location - created_on: 1715267200693 - modified_on: 1715267200693 - fallback_content: Unable to fetch location. - description: Fetches user's current location. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }}, "required": ["location"] } - - tool_type: FUNCTION - id: 4442f3ea-9038-40e3-a2ce-1522b7de770f - version: 0 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius or fahrenheit based - on location of user. - name: get_current_weather - created_on: 1715266126705 - modified_on: 1715266126705 - fallback_content: Unable to fetch location. - description: >- - Fetches current weather and uses celsius or fahrenheit based - on location of user. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit"], "description": "The temperature - unit to use. Infer this from the users location." } }, - "required": ["location", "format"] } - create-tool: - path: /v0/evi/tools - method: POST - docs: >- - Creates a **Tool** that can be added to an [EVI - configuration](/reference/speech-to-speech-evi/configs/create-config). - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - display-name: Create tool - request: - name: PostedUserDefinedTool - body: - properties: - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the - tool call result. The LLM then uses this text to generate a - response back to the user, ensuring continuity in the - conversation if the Tool errors. - name: - type: string - docs: Name applied to all versions of a particular Tool. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of - the Tool. - - - These parameters define the inputs needed for the Tool's - execution, including the expected data type and description for - each input field. Structured as a stringified JSON schema, this - format ensures the Tool receives data in the expected format. - version_description: - type: optional - docs: An optional description of the Tool version. - content-type: application/json - response: - docs: Created - type: optional - status-code: 201 - errors: - - root.BadRequestError - examples: - - request: - name: get_current_weather - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San Francisco, - CA" }, "format": { "type": "string", "enum": ["celsius", - "fahrenheit"], "description": "The temperature unit to use. Infer - this from the users location." } }, "required": ["location", - "format"] } - version_description: >- - Fetches current weather and uses celsius or fahrenheit based on - location of user. - description: This tool is for getting the current weather. - fallback_content: Unable to fetch current weather. - response: - body: - tool_type: FUNCTION - id: aa9b71c4-723c-47ff-9f83-1a1829e74376 - version: 0 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius or fahrenheit based on - location of user. - name: get_current_weather - created_on: 1715275452390 - modified_on: 1715275452390 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit"], "description": "The temperature unit - to use. Infer this from the users location." } }, "required": - ["location", "format"] } - list-tool-versions: - path: /v0/evi/tools/{id} - method: GET - docs: >- - Fetches a list of a **Tool's** versions. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - pagination: - offset: $request.page_number - results: $response.tools_page - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: List tool versions - request: - name: ToolsListToolVersionsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each tool. To include all versions of - each tool in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedUserDefinedTools - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - tools_page: - - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or - kelvin based on location of user. - name: get_current_weather - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users - location." } }, "required": ["location", "format"] } - create-tool-version: - path: /v0/evi/tools/{id} - method: POST - docs: >- - Updates a **Tool** by creating a new version of the **Tool**. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Create tool version - request: - name: PostedUserDefinedToolVersion - body: - properties: - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the - tool call result. The LLM then uses this text to generate a - response back to the user, ensuring continuity in the - conversation if the Tool errors. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of - the Tool. - - - These parameters define the inputs needed for the Tool's - execution, including the expected data type and description for - each input field. Structured as a stringified JSON schema, this - format ensures the Tool receives data in the expected format. - version_description: - type: optional - docs: An optional description of the Tool version. - content-type: application/json - response: - docs: Created - type: optional - status-code: 201 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - request: - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San Francisco, - CA" }, "format": { "type": "string", "enum": ["celsius", - "fahrenheit", "kelvin"], "description": "The temperature unit to - use. Infer this from the users location." } }, "required": - ["location", "format"] } - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - name: get_current_weather - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - delete-tool: - path: /v0/evi/tools/{id} - method: DELETE - docs: >- - Deletes a **Tool** and its versions. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Delete tool - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - update-tool-name: - path: /v0/evi/tools/{id} - method: PATCH - docs: >- - Updates the name of a **Tool**. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Update tool name - request: - name: PostedUserDefinedToolName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Tool. - content-type: application/json - response: - docs: Success - type: text - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - request: - name: get_current_temperature - get-tool-version: - path: /v0/evi/tools/{id}/version/{version} - method: GET - docs: >- - Fetches a specified version of a **Tool**. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Get tool version - response: - docs: Success - type: optional - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - name: string - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - delete-tool-version: - path: /v0/evi/tools/{id}/version/{version} - method: DELETE - docs: >- - Deletes a specified version of a **Tool**. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Delete tool version - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - update-tool-description: - path: /v0/evi/tools/{id}/version/{version} - method: PATCH - docs: >- - Updates the description of a specified **Tool** version. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Update tool description - request: - name: PostedUserDefinedToolVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Tool version. - content-type: application/json - response: - docs: Success - type: optional - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - request: - version_description: >- - Fetches current temperature, precipitation, wind speed, AQI, and - other weather conditions. Uses Celsius, Fahrenheit, or kelvin - depending on user's region. - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current temperature, precipitation, wind speed, AQI, and - other weather conditions. Uses Celsius, Fahrenheit, or kelvin - depending on user's region. - name: string - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - source: - openapi: evi-openapi.json diff --git a/.mock/definition/expression-measurement/__package__.yml b/.mock/definition/expression-measurement/__package__.yml deleted file mode 100644 index 0967ef42..00000000 --- a/.mock/definition/expression-measurement/__package__.yml +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/.mock/definition/expression-measurement/batch/__package__.yml b/.mock/definition/expression-measurement/batch/__package__.yml deleted file mode 100644 index 98834cc8..00000000 --- a/.mock/definition/expression-measurement/batch/__package__.yml +++ /dev/null @@ -1,1814 +0,0 @@ -service: - auth: false - base-path: '' - endpoints: - list-jobs: - path: /v0/batch/jobs - method: GET - docs: Sort and filter jobs. - source: - openapi: batch-openapi.json - display-name: List jobs - request: - name: BatchListJobsRequest - query-parameters: - limit: - type: optional - default: 50 - docs: The maximum number of jobs to include in the response. - status: - type: optional - allow-multiple: true - docs: >- - Include only jobs of this status in the response. There are four - possible statuses: - - - - `QUEUED`: The job has been received and is waiting to be - processed. - - - - `IN_PROGRESS`: The job is currently being processed. - - - - `COMPLETED`: The job has finished processing. - - - - `FAILED`: The job encountered an error and could not be - completed successfully. - when: - type: optional - docs: >- - Specify whether to include jobs created before or after a given - `timestamp_ms`. - timestamp_ms: - type: optional - default: 1704319392247 - docs: |- - Provide a timestamp in milliseconds to filter jobs. - - When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. - sort_by: - type: optional - docs: >- - Specify which timestamp to sort the jobs by. - - - - `created`: Sort jobs by the time of creation, indicated by - `created_timestamp_ms`. - - - - `started`: Sort jobs by the time processing started, indicated - by `started_timestamp_ms`. - - - - `ended`: Sort jobs by the time processing ended, indicated by - `ended_timestamp_ms`. - direction: - type: optional - docs: >- - Specify the order in which to sort the jobs. Defaults to - descending order. - - - - `asc`: Sort in ascending order (chronological, with the oldest - records first). - - - - `desc`: Sort in descending order (reverse-chronological, with - the newest records first). - response: - docs: '' - type: list - status-code: 200 - examples: - - response: - body: - - job_id: job_id - request: - callback_url: null - files: - - filename: filename - md5sum: md5sum - content_type: content_type - models: - burst: {} - face: - descriptions: null - facs: null - fps_pred: 3 - identify_faces: false - min_face_size: 60 - prob_threshold: 0.99 - save_faces: false - facemesh: {} - language: - granularity: word - identify_speakers: false - sentiment: null - toxicity: null - ner: - identify_speakers: false - prosody: - granularity: utterance - identify_speakers: false - window: null - notify: true - text: [] - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - state: - created_timestamp_ms: 1712587158717 - ended_timestamp_ms: 1712587159274 - num_errors: 0 - num_predictions: 10 - started_timestamp_ms: 1712587158800 - status: COMPLETED - type: INFERENCE - start-inference-job: - path: /v0/batch/jobs - method: POST - docs: Start a new measurement inference job. - source: - openapi: batch-openapi.json - display-name: Start inference job - request: - body: InferenceBaseRequest - content-type: application/json - response: - docs: '' - type: JobId - status-code: 200 - property: job_id - examples: - - request: - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - notify: true - response: - body: - job_id: job_id - get-job-details: - path: /v0/batch/jobs/{id} - method: GET - docs: Get the request details and state of a given job. - source: - openapi: batch-openapi.json - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job details - response: - docs: '' - type: UnionJob - status-code: 200 - examples: - - name: Inference - path-parameters: - id: job_id - response: - body: - type: INFERENCE - job_id: job_id - request: - callback_url: null - files: [] - models: - burst: {} - face: - descriptions: null - facs: null - fps_pred: 3 - identify_faces: false - min_face_size: 60 - prob_threshold: 0.99 - save_faces: false - facemesh: {} - language: - granularity: word - identify_speakers: false - sentiment: null - toxicity: null - ner: - identify_speakers: false - prosody: - granularity: utterance - identify_speakers: false - window: null - notify: true - text: [] - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - state: - created_timestamp_ms: 1712590457884 - ended_timestamp_ms: 1712590462252 - num_errors: 0 - num_predictions: 10 - started_timestamp_ms: 1712590457995 - status: COMPLETED - get-job-predictions: - path: /v0/batch/jobs/{id}/predictions - method: GET - docs: Get the JSON predictions of a completed inference job. - source: - openapi: batch-openapi.json - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job predictions - response: - docs: '' - type: list - status-code: 200 - examples: - - path-parameters: - id: job_id - response: - body: - - source: - type: url - url: https://hume-tutorials.s3.amazonaws.com/faces.zip - results: - predictions: - - file: faces/100.jpg - models: - face: - metadata: null - grouped_predictions: - - id: unknown - predictions: - - frame: 0 - time: 0 - prob: 0.9994111061096191 - box: - x: 1187.885986328125 - 'y': 1397.697509765625 - w: 1401.668701171875 - h: 1961.424560546875 - emotions: - - name: Admiration - score: 0.10722749680280685 - - name: Adoration - score: 0.06395940482616425 - - name: Aesthetic Appreciation - score: 0.05811462551355362 - - name: Amusement - score: 0.14187128841876984 - - name: Anger - score: 0.02804684266448021 - - name: Anxiety - score: 0.2713485360145569 - - name: Awe - score: 0.33812594413757324 - - name: Awkwardness - score: 0.1745193600654602 - - name: Boredom - score: 0.23600080609321594 - - name: Calmness - score: 0.18988418579101562 - - name: Concentration - score: 0.44288986921310425 - - name: Confusion - score: 0.39346569776535034 - - name: Contemplation - score: 0.31002455949783325 - - name: Contempt - score: 0.048870109021663666 - - name: Contentment - score: 0.0579497292637825 - - name: Craving - score: 0.06544201076030731 - - name: Desire - score: 0.05526508390903473 - - name: Determination - score: 0.08590991795063019 - - name: Disappointment - score: 0.19508258998394012 - - name: Disgust - score: 0.031529419124126434 - - name: Distress - score: 0.23210826516151428 - - name: Doubt - score: 0.3284550905227661 - - name: Ecstasy - score: 0.040716782212257385 - - name: Embarrassment - score: 0.1467227339744568 - - name: Empathic Pain - score: 0.07633581757545471 - - name: Entrancement - score: 0.16245244443416595 - - name: Envy - score: 0.03267110139131546 - - name: Excitement - score: 0.10656816512346268 - - name: Fear - score: 0.3115977346897125 - - name: Guilt - score: 0.11615975946187973 - - name: Horror - score: 0.19795553386211395 - - name: Interest - score: 0.3136432468891144 - - name: Joy - score: 0.06285581737756729 - - name: Love - score: 0.06339752674102783 - - name: Nostalgia - score: 0.05866732448339462 - - name: Pain - score: 0.07684041559696198 - - name: Pride - score: 0.026822954416275024 - - name: Realization - score: 0.30000734329223633 - - name: Relief - score: 0.04414166510105133 - - name: Romance - score: 0.042728863656520844 - - name: Sadness - score: 0.14773206412792206 - - name: Satisfaction - score: 0.05902980640530586 - - name: Shame - score: 0.08103451132774353 - - name: Surprise (negative) - score: 0.25518184900283813 - - name: Surprise (positive) - score: 0.28845661878585815 - - name: Sympathy - score: 0.062488824129104614 - - name: Tiredness - score: 0.1559651643037796 - - name: Triumph - score: 0.01955239288508892 - facs: null - descriptions: null - errors: [] - get-job-artifacts: - path: /v0/batch/jobs/{id}/artifacts - method: GET - docs: Get the artifacts ZIP of a completed inference job. - source: - openapi: batch-openapi.json - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job artifacts - response: - docs: '' - type: file - status-code: 200 - start-inference-job-from-local-file: - path: /v0/batch/jobs - method: POST - auth: - - BearerAuth: [] - docs: Start a new batch inference job. - source: - openapi: batch-files-openapi.yml - display-name: Start inference job from local file - request: - name: BatchStartInferenceJobFromLocalFileRequest - body: - properties: - json: - type: optional - docs: >- - Stringified JSON object containing the inference job - configuration. - file: - type: list - docs: >- - Local media files (see recommended input filetypes) to be - processed. - - - If you wish to supply more than 100 files, consider providing - them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - content-type: multipart/form-data - response: - docs: '' - type: JobId - status-code: 200 - property: job_id - examples: - - request: {} - response: - body: - job_id: job_id - source: - openapi: batch-files-openapi.yml -types: - Alternative: literal<"language_only"> - Bcp47Tag: - enum: - - zh - - da - - nl - - en - - value: en-AU - name: EnAu - - value: en-IN - name: EnIn - - value: en-NZ - name: EnNz - - value: en-GB - name: EnGb - - fr - - value: fr-CA - name: FrCa - - de - - hi - - value: hi-Latn - name: HiLatn - - id - - it - - ja - - ko - - 'no' - - pl - - pt - - value: pt-BR - name: PtBr - - value: pt-PT - name: PtPt - - ru - - es - - value: es-419 - name: Es419 - - sv - - ta - - tr - - uk - source: - openapi: batch-files-openapi.yml - BoundingBox: - docs: A bounding box around a face. - properties: - x: - type: double - docs: x-coordinate of bounding box top left corner. - 'y': - type: double - docs: y-coordinate of bounding box top left corner. - w: - type: double - docs: Bounding box width. - h: - type: double - docs: Bounding box height. - source: - openapi: batch-openapi.json - BurstPrediction: - properties: - time: TimeInterval - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - descriptions: - docs: Modality-specific descriptive features and their scores. - type: list - source: - openapi: batch-openapi.json - Classification: map - CompletedEmbeddingGeneration: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - source: - openapi: batch-openapi.json - CompletedInference: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - num_predictions: - type: uint64 - docs: The number of predictions that were generated by this job. - num_errors: - type: uint64 - docs: The number of errors that occurred while running this job. - source: - openapi: batch-openapi.json - CompletedTlInference: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - num_predictions: - type: uint64 - docs: The number of predictions that were generated by this job. - num_errors: - type: uint64 - docs: The number of errors that occurred while running this job. - source: - openapi: batch-openapi.json - CompletedTraining: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - custom_model: TrainingCustomModel - alternatives: optional> - source: - openapi: batch-openapi.json - CustomModelPrediction: - properties: - output: map - error: string - task_type: string - source: - openapi: batch-openapi.json - CustomModelRequest: - properties: - name: string - description: optional - tags: optional> - source: - openapi: batch-openapi.json - Dataset: - discriminated: false - union: - - DatasetId - - DatasetVersionId - source: - openapi: batch-openapi.json - DatasetId: - properties: - id: - type: string - validation: - format: uuid - source: - openapi: batch-openapi.json - DatasetVersionId: - properties: - version_id: - type: string - validation: - format: uuid - source: - openapi: batch-openapi.json - DescriptionsScore: - properties: - name: - type: string - docs: Name of the descriptive feature being expressed. - score: - type: float - docs: Embedding value for the descriptive feature being expressed. - source: - openapi: batch-openapi.json - Direction: - enum: - - asc - - desc - source: - openapi: batch-openapi.json - EmbeddingGenerationBaseRequest: - properties: - registry_file_details: - type: optional> - docs: File ID and File URL pairs for an asset registry file - source: - openapi: batch-openapi.json - EmotionScore: - properties: - name: - type: string - docs: Name of the emotion being expressed. - score: - type: float - docs: Embedding value for the emotion being expressed. - source: - openapi: batch-openapi.json - Error: - properties: - message: - type: string - docs: An error message. - file: - type: string - docs: A file path relative to the top level source URL or file. - source: - openapi: batch-openapi.json - EvaluationArgs: - properties: - validation: optional - source: - openapi: batch-openapi.json - Face: - docs: >- - The Facial Emotional Expression model analyzes human facial expressions in - images and videos. Results will be provided per frame for video files. - - - Recommended input file types: `.png`, `.jpeg`, `.mp4` - properties: - fps_pred: - type: optional - docs: >- - Number of frames per second to process. Other frames will be omitted - from the response. Set to `0` to process every frame. - default: 3 - prob_threshold: - type: optional - docs: >- - Face detection probability threshold. Faces detected with a - probability less than this threshold will be omitted from the - response. - default: 0.99 - validation: - min: 0 - max: 1 - identify_faces: - type: optional - docs: >- - Whether to return identifiers for faces across frames. If `true`, - unique identifiers will be assigned to face bounding boxes to - differentiate different faces. If `false`, all faces will be tagged - with an `unknown` ID. - default: false - min_face_size: - type: optional - docs: >- - Minimum bounding box side length in pixels to treat as a face. Faces - detected with a bounding box side length in pixels less than this - threshold will be omitted from the response. - facs: optional - descriptions: optional - save_faces: - type: optional - docs: >- - Whether to extract and save the detected faces in the artifacts zip - created by each job. - default: false - source: - openapi: batch-files-openapi.yml - FacePrediction: - properties: - frame: - type: uint64 - docs: Frame number - time: - type: double - docs: Time in seconds when face detection occurred. - prob: - type: double - docs: The predicted probability that a detected face was actually a face. - box: BoundingBox - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - facs: - type: optional> - docs: FACS 2.0 features and their scores. - descriptions: - type: optional> - docs: Modality-specific descriptive features and their scores. - source: - openapi: batch-openapi.json - FacemeshPrediction: - properties: - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: batch-openapi.json - FacsScore: - properties: - name: - type: string - docs: Name of the FACS 2.0 feature being expressed. - score: - type: float - docs: Embedding value for the FACS 2.0 feature being expressed. - source: - openapi: batch-openapi.json - Failed: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - message: - type: string - docs: An error message. - source: - openapi: batch-openapi.json - File: - docs: The list of files submitted for analysis. - properties: - filename: - type: optional - docs: The name of the file. - content_type: - type: optional - docs: The content type of the file. - md5sum: - type: string - docs: The MD5 checksum of the file. - source: - openapi: batch-openapi.json - Granularity: - enum: - - word - - sentence - - utterance - - conversational_turn - docs: >- - The granularity at which to generate predictions. The `granularity` field - is ignored if transcription is not enabled or if the `window` field has - been set. - - - - `word`: At the word level, our model provides a separate output for each - word, offering the most granular insight into emotional expression during - speech. - - - - `sentence`: At the sentence level of granularity, we annotate the - emotional tone of each spoken sentence with our Prosody and Emotional - Language models. - - - - `utterance`: Utterance-level granularity is between word- and - sentence-level. It takes into account natural pauses or breaks in speech, - providing more rapidly updated measures of emotional expression within a - flowing conversation. For text inputs, utterance-level granularity will - produce results identical to sentence-level granularity. - - - - `conversational_turn`: Conversational turn-level granularity provides a - distinct output for each change in speaker. It captures the full sequence - of words and sentences spoken uninterrupted by each person. This approach - provides a higher-level view of the emotional dynamics in a - multi-participant dialogue. For text inputs, specifying conversational - turn-level granularity for our Emotional Language model will produce - results for the entire passage. - source: - openapi: batch-files-openapi.yml - GroupedPredictionsBurstPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsFacePrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsFacemeshPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsLanguagePrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsNerPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsProsodyPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - InProgress: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - source: - openapi: batch-openapi.json - InferenceBaseRequest: - properties: - models: - type: optional - docs: >- - Specify the models to use for inference. - - - If this field is not explicitly set, then all models will run by - default. - transcription: optional - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - text: - type: optional> - docs: >- - Text supplied directly to our Emotional Language and NER models for - analysis. - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - source: - openapi: batch-files-openapi.yml - InferencePrediction: - properties: - file: - type: string - docs: A file path relative to the top level source URL or file. - models: ModelsPredictions - source: - openapi: batch-openapi.json - InferenceRequest: - properties: - models: optional - transcription: optional - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - text: - type: optional> - docs: Text to supply directly to our language and NER models. - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - files: list - source: - openapi: batch-openapi.json - InferenceResults: - properties: - predictions: list - errors: list - source: - openapi: batch-openapi.json - InferenceSourcePredictResult: - properties: - source: Source - results: optional - error: - type: optional - docs: An error message. - source: - openapi: batch-openapi.json - JobEmbeddingGeneration: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: EmbeddingGenerationBaseRequest - state: StateEmbeddingGeneration - source: - openapi: batch-openapi.json - JobInference: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - request: - type: InferenceRequest - docs: The request that initiated the job. - state: - type: StateInference - docs: The current state of the job. - source: - openapi: batch-openapi.json - JobTlInference: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: TlInferenceBaseRequest - state: StateTlInference - source: - openapi: batch-openapi.json - JobTraining: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: TrainingBaseRequest - state: StateTraining - source: - openapi: batch-openapi.json - JobId: - properties: - job_id: - type: string - docs: The ID of the started job. - validation: - format: uuid - source: - openapi: batch-files-openapi.yml - Language: - docs: >- - The Emotional Language model analyzes passages of text. This also supports - audio and video files by transcribing and then directly analyzing the - transcribed text. - - - Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` - properties: - granularity: optional - sentiment: optional - toxicity: optional - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: batch-files-openapi.yml - LanguagePrediction: - properties: - text: - type: string - docs: A segment of text (like a word or a sentence). - position: PositionInterval - time: optional - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - sentiment: - type: optional> - docs: >- - Sentiment predictions returned as a distribution. This model predicts - the probability that a given text could be interpreted as having each - sentiment level from `1` (negative) to `9` (positive). - - - Compared to returning one estimate of sentiment, this enables a more - nuanced analysis of a text's meaning. For example, a text with very - neutral sentiment would have an average rating of `5`. But also a text - that could be interpreted as having very positive sentiment or very - negative sentiment would also have an average rating of `5`. The - average sentiment is less informative than the distribution over - sentiment, so this API returns a value for each sentiment level. - toxicity: - type: optional> - docs: >- - Toxicity predictions returned as probabilities that the text can be - classified into the following categories: `toxic`, `severe_toxic`, - `obscene`, `threat`, `insult`, and `identity_hate`. - source: - openapi: batch-openapi.json - Models: - docs: The models used for inference. - properties: - face: optional - burst: optional - prosody: optional - language: optional - ner: optional - facemesh: optional - source: - openapi: batch-files-openapi.yml - ModelsPredictions: - properties: - face: optional - burst: optional - prosody: optional - language: optional - ner: optional - facemesh: optional - source: - openapi: batch-openapi.json - Ner: - docs: >- - The NER (Named-entity Recognition) model identifies real-world objects and - concepts in passages of text. This also supports audio and video files by - transcribing and then directly analyzing the transcribed text. - - - Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` - properties: - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: batch-files-openapi.yml - NerPrediction: - properties: - entity: - type: string - docs: The recognized topic or entity. - position: PositionInterval - entity_confidence: - type: double - docs: Our NER model's relative confidence in the recognized topic or entity. - support: - type: double - docs: A measure of how often the entity is linked to by other entities. - uri: - type: string - docs: >- - A URL which provides more information about the recognized topic or - entity. - link_word: - type: string - docs: The specific word to which the emotion predictions are linked. - time: optional - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: batch-openapi.json - 'Null': - type: map - docs: No associated metadata for this model. Value will be `null`. - PositionInterval: - docs: >- - Position of a segment of text within a larger document, measured in - characters. Uses zero-based indexing. The beginning index is inclusive and - the end index is exclusive. - properties: - begin: - type: uint64 - docs: The index of the first character in the text segment, inclusive. - end: - type: uint64 - docs: The index of the last character in the text segment, exclusive. - source: - openapi: batch-openapi.json - PredictionsOptionalNullBurstPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalNullFacePrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalNullFacemeshPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalTranscriptionMetadataLanguagePrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalTranscriptionMetadataNerPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalTranscriptionMetadataProsodyPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - Prosody: - docs: >- - The Speech Prosody model analyzes the intonation, stress, and rhythm of - spoken word. - - - Recommended input file types: `.wav`, `.mp3`, `.mp4` - properties: - granularity: optional - window: optional - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: batch-files-openapi.yml - ProsodyPrediction: - properties: - text: - type: optional - docs: A segment of text (like a word or a sentence). - time: TimeInterval - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: batch-openapi.json - Queued: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - source: - openapi: batch-openapi.json - RegistryFileDetail: - properties: - file_id: - type: string - docs: File ID in the Asset Registry - file_url: - type: string - docs: URL to the file in the Asset Registry - source: - openapi: batch-openapi.json - Regression: map - SentimentScore: - properties: - name: - type: string - docs: Level of sentiment, ranging from `1` (negative) to `9` (positive) - score: - type: float - docs: Prediction for this level of sentiment - source: - openapi: batch-openapi.json - SortBy: - enum: - - created - - started - - ended - source: - openapi: batch-openapi.json - Source: - discriminated: false - union: - - SourceUrl - - SourceFile - - SourceTextSource - source: - openapi: batch-openapi.json - SourceFile: - properties: - type: literal<"file"> - extends: - - File - source: - openapi: batch-openapi.json - SourceTextSource: - properties: - type: literal<"text"> - source: - openapi: batch-openapi.json - SourceUrl: - properties: - type: literal<"url"> - extends: - - Url - source: - openapi: batch-openapi.json - StateEmbeddingGeneration: - discriminated: false - union: - - StateEmbeddingGenerationQueued - - StateEmbeddingGenerationInProgress - - StateEmbeddingGenerationCompletedEmbeddingGeneration - - StateEmbeddingGenerationFailed - source: - openapi: batch-openapi.json - StateEmbeddingGenerationCompletedEmbeddingGeneration: - properties: - status: literal<"COMPLETED"> - extends: - - CompletedEmbeddingGeneration - source: - openapi: batch-openapi.json - StateEmbeddingGenerationFailed: - properties: - status: literal<"FAILED"> - extends: - - Failed - source: - openapi: batch-openapi.json - StateEmbeddingGenerationInProgress: - properties: - status: literal<"IN_PROGRESS"> - extends: - - InProgress - source: - openapi: batch-openapi.json - StateEmbeddingGenerationQueued: - properties: - status: literal<"QUEUED"> - extends: - - Queued - source: - openapi: batch-openapi.json - StateInference: - discriminated: false - union: - - QueuedState - - InProgressState - - CompletedState - - FailedState - source: - openapi: batch-openapi.json - CompletedState: - properties: - status: literal<"COMPLETED"> - extends: - - CompletedInference - source: - openapi: batch-openapi.json - FailedState: - properties: - status: literal<"FAILED"> - extends: - - Failed - source: - openapi: batch-openapi.json - InProgressState: - properties: - status: literal<"IN_PROGRESS"> - extends: - - InProgress - source: - openapi: batch-openapi.json - QueuedState: - properties: - status: literal<"QUEUED"> - extends: - - Queued - source: - openapi: batch-openapi.json - StateTlInference: - discriminated: false - union: - - StateTlInferenceQueued - - StateTlInferenceInProgress - - StateTlInferenceCompletedTlInference - - StateTlInferenceFailed - source: - openapi: batch-openapi.json - StateTlInferenceCompletedTlInference: - properties: - status: literal<"COMPLETED"> - extends: - - CompletedTlInference - source: - openapi: batch-openapi.json - StateTlInferenceFailed: - properties: - status: literal<"FAILED"> - extends: - - Failed - source: - openapi: batch-openapi.json - StateTlInferenceInProgress: - properties: - status: literal<"IN_PROGRESS"> - extends: - - InProgress - source: - openapi: batch-openapi.json - StateTlInferenceQueued: - properties: - status: literal<"QUEUED"> - extends: - - Queued - source: - openapi: batch-openapi.json - StateTraining: - discriminated: false - union: - - StateTrainingQueued - - StateTrainingInProgress - - StateTrainingCompletedTraining - - StateTrainingFailed - source: - openapi: batch-openapi.json - StateTrainingCompletedTraining: - properties: - status: literal<"COMPLETED"> - extends: - - CompletedTraining - source: - openapi: batch-openapi.json - StateTrainingFailed: - properties: - status: literal<"FAILED"> - extends: - - Failed - source: - openapi: batch-openapi.json - StateTrainingInProgress: - properties: - status: literal<"IN_PROGRESS"> - extends: - - InProgress - source: - openapi: batch-openapi.json - StateTrainingQueued: - properties: - status: literal<"QUEUED"> - extends: - - Queued - source: - openapi: batch-openapi.json - Status: - enum: - - QUEUED - - IN_PROGRESS - - COMPLETED - - FAILED - source: - openapi: batch-openapi.json - TlInferencePrediction: - properties: - file: - type: string - docs: A file path relative to the top level source URL or file. - file_type: string - custom_models: map - source: - openapi: batch-openapi.json - TlInferenceResults: - properties: - predictions: list - errors: list - source: - openapi: batch-openapi.json - TlInferenceSourcePredictResult: - properties: - source: Source - results: optional - error: - type: optional - docs: An error message. - source: - openapi: batch-openapi.json - Tag: - properties: - key: string - value: string - source: - openapi: batch-openapi.json - Target: - discriminated: false - union: - - long - - double - - string - source: - openapi: batch-openapi.json - Task: - discriminated: false - union: - - TaskClassification - - TaskRegression - source: - openapi: batch-openapi.json - TaskClassification: - properties: - type: literal<"classification"> - source: - openapi: batch-openapi.json - TaskRegression: - properties: - type: literal<"regression"> - source: - openapi: batch-openapi.json - TextSource: map - TimeInterval: - docs: A time range with a beginning and end, measured in seconds. - properties: - begin: - type: double - docs: Beginning of time range in seconds. - end: - type: double - docs: End of time range in seconds. - source: - openapi: batch-openapi.json - TlInferenceBaseRequest: - properties: - custom_model: CustomModel - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - source: - openapi: batch-openapi.json - CustomModel: - discriminated: false - union: - - CustomModelId - - CustomModelVersionId - source: - openapi: batch-openapi.json - CustomModelId: - properties: - id: string - source: - openapi: batch-openapi.json - CustomModelVersionId: - properties: - version_id: string - source: - openapi: batch-openapi.json - ToxicityScore: - properties: - name: - type: string - docs: Category of toxicity. - score: - type: float - docs: Prediction for this category of toxicity - source: - openapi: batch-openapi.json - TrainingBaseRequest: - properties: - custom_model: CustomModelRequest - dataset: Dataset - target_feature: - type: optional - default: label - task: optional - evaluation: optional - alternatives: optional> - callback_url: optional - notify: - type: optional - default: false - source: - openapi: batch-openapi.json - TrainingCustomModel: - properties: - id: string - version_id: optional - source: - openapi: batch-openapi.json - Transcription: - docs: |- - Transcription-related configuration options. - - To disable transcription, explicitly set this field to `null`. - properties: - language: - type: optional - docs: >- - By default, we use an automated language detection method for our - Speech Prosody, Language, and NER models. However, if you know what - language is being spoken in your media samples, you can specify it via - its BCP-47 tag and potentially obtain more accurate results. - - - You can specify any of the following languages: - - - Chinese: `zh` - - - Danish: `da` - - - Dutch: `nl` - - - English: `en` - - - English (Australia): `en-AU` - - - English (India): `en-IN` - - - English (New Zealand): `en-NZ` - - - English (United Kingdom): `en-GB` - - - French: `fr` - - - French (Canada): `fr-CA` - - - German: `de` - - - Hindi: `hi` - - - Hindi (Roman Script): `hi-Latn` - - - Indonesian: `id` - - - Italian: `it` - - - Japanese: `ja` - - - Korean: `ko` - - - Norwegian: `no` - - - Polish: `pl` - - - Portuguese: `pt` - - - Portuguese (Brazil): `pt-BR` - - - Portuguese (Portugal): `pt-PT` - - - Russian: `ru` - - - Spanish: `es` - - - Spanish (Latin America): `es-419` - - - Swedish: `sv` - - - Tamil: `ta` - - - Turkish: `tr` - - - Ukrainian: `uk` - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - confidence_threshold: - type: optional - docs: >- - Transcript confidence threshold. Transcripts generated with a - confidence less than this threshold will be considered invalid and not - used as an input for model inference. - default: 0.5 - validation: - min: 0 - max: 1 - source: - openapi: batch-files-openapi.yml - TranscriptionMetadata: - docs: Transcription metadata for your media file. - properties: - confidence: - type: double - docs: >- - Value between `0.0` and `1.0` indicating our transcription model's - relative confidence in the transcription of your media file. - detected_language: optional - source: - openapi: batch-openapi.json - Type: - enum: - - EMBEDDING_GENERATION - - INFERENCE - - TL_INFERENCE - - TRAINING - source: - openapi: batch-openapi.json - Unconfigurable: - type: map - docs: >- - To include predictions for this model type, set this field to `{}`. It is - currently not configurable further. - UnionJob: InferenceJob - EmbeddingGenerationJob: - properties: - type: string - extends: - - JobEmbeddingGeneration - source: - openapi: batch-openapi.json - InferenceJob: - properties: - type: - type: string - docs: >- - Denotes the job type. - - - Jobs created with the Expression Measurement API will have this field - set to `INFERENCE`. - extends: - - JobInference - source: - openapi: batch-openapi.json - CustomModelsInferenceJob: - properties: - type: string - extends: - - JobTlInference - source: - openapi: batch-openapi.json - CustomModelsTrainingJob: - properties: - type: string - extends: - - JobTraining - source: - openapi: batch-openapi.json - UnionPredictResult: InferenceSourcePredictResult - Url: - properties: - url: - type: string - docs: The URL of the source media file. - source: - openapi: batch-openapi.json - ValidationArgs: - properties: - positive_label: optional - source: - openapi: batch-openapi.json - When: - enum: - - created_before - - created_after - source: - openapi: batch-openapi.json - Window: - docs: >- - Generate predictions based on time. - - - Setting the `window` field allows for a 'sliding window' approach, where a - fixed-size window moves across the audio or video file in defined steps. - This enables continuous analysis of prosody within subsets of the file, - providing dynamic and localized insights into emotional expression. - properties: - length: - type: optional - docs: The length of the sliding window. - default: 4 - validation: - min: 0.5 - step: - type: optional - docs: The step size of the sliding window. - default: 1 - validation: - min: 0.5 - source: - openapi: batch-files-openapi.yml diff --git a/.mock/definition/expression-measurement/stream/__package__.yml b/.mock/definition/expression-measurement/stream/__package__.yml deleted file mode 100644 index 94df9784..00000000 --- a/.mock/definition/expression-measurement/stream/__package__.yml +++ /dev/null @@ -1,113 +0,0 @@ -types: - EmotionEmbeddingItem: - properties: - name: - type: optional - docs: Name of the emotion being expressed. - score: - type: optional - docs: Embedding value for the emotion being expressed. - source: - openapi: streaming-asyncapi.yml - EmotionEmbedding: - docs: A high-dimensional embedding in emotion space. - type: list - StreamBoundingBox: - docs: A bounding box around a face. - properties: - x: - type: optional - docs: x-coordinate of bounding box top left corner. - validation: - min: 0 - 'y': - type: optional - docs: y-coordinate of bounding box top left corner. - validation: - min: 0 - w: - type: optional - docs: Bounding box width. - validation: - min: 0 - h: - type: optional - docs: Bounding box height. - validation: - min: 0 - source: - openapi: streaming-asyncapi.yml - TimeRange: - docs: A time range with a beginning and end, measured in seconds. - properties: - begin: - type: optional - docs: Beginning of time range in seconds. - validation: - min: 0 - end: - type: optional - docs: End of time range in seconds. - validation: - min: 0 - source: - openapi: streaming-asyncapi.yml - TextPosition: - docs: > - Position of a segment of text within a larger document, measured in - characters. Uses zero-based indexing. The beginning index is inclusive and - the end index is exclusive. - properties: - begin: - type: optional - docs: The index of the first character in the text segment, inclusive. - validation: - min: 0 - end: - type: optional - docs: The index of the last character in the text segment, exclusive. - validation: - min: 0 - source: - openapi: streaming-asyncapi.yml - SentimentItem: - properties: - name: - type: optional - docs: Level of sentiment, ranging from 1 (negative) to 9 (positive) - score: - type: optional - docs: Prediction for this level of sentiment - source: - openapi: streaming-asyncapi.yml - Sentiment: - docs: >- - Sentiment predictions returned as a distribution. This model predicts the - probability that a given text could be interpreted as having each - sentiment level from 1 (negative) to 9 (positive). - - - Compared to returning one estimate of sentiment, this enables a more - nuanced analysis of a text's meaning. For example, a text with very - neutral sentiment would have an average rating of 5. But also a text that - could be interpreted as having very positive sentiment or very negative - sentiment would also have an average rating of 5. The average sentiment is - less informative than the distribution over sentiment, so this API returns - a value for each sentiment level. - type: list - ToxicityItem: - properties: - name: - type: optional - docs: Category of toxicity. - score: - type: optional - docs: Prediction for this category of toxicity - source: - openapi: streaming-asyncapi.yml - Toxicity: - docs: >- - Toxicity predictions returned as probabilities that the text can be - classified into the following categories: toxic, severe_toxic, obscene, - threat, insult, and identity_hate. - type: list diff --git a/.mock/definition/expression-measurement/stream/stream.yml b/.mock/definition/expression-measurement/stream/stream.yml deleted file mode 100644 index d9c46dc8..00000000 --- a/.mock/definition/expression-measurement/stream/stream.yml +++ /dev/null @@ -1,437 +0,0 @@ -channel: - path: /models - url: stream - auth: false - headers: - X-Hume-Api-Key: - type: string - name: humeApiKey - messages: - publish: - origin: client - body: - type: StreamModelsEndpointPayload - docs: Models endpoint payload - subscribe: - origin: server - body: SubscribeEvent - examples: - - messages: - - type: publish - body: {} - - type: subscribe - body: {} -types: - StreamFace: - docs: > - Configuration for the facial expression emotion model. - - - Note: Using the `reset_stream` parameter does not have any effect on face - identification. A single face identifier cache is maintained over a full - session whether `reset_stream` is used or not. - properties: - facs: - type: optional> - docs: >- - Configuration for FACS predictions. If missing or null, no FACS - predictions will be generated. - descriptions: - type: optional> - docs: >- - Configuration for Descriptions predictions. If missing or null, no - Descriptions predictions will be generated. - identify_faces: - type: optional - docs: > - Whether to return identifiers for faces across frames. If true, unique - identifiers will be assigned to face bounding boxes to differentiate - different faces. If false, all faces will be tagged with an "unknown" - ID. - default: false - fps_pred: - type: optional - docs: > - Number of frames per second to process. Other frames will be omitted - from the response. - default: 3 - prob_threshold: - type: optional - docs: > - Face detection probability threshold. Faces detected with a - probability less than this threshold will be omitted from the - response. - default: 3 - min_face_size: - type: optional - docs: > - Minimum bounding box side length in pixels to treat as a face. Faces - detected with a bounding box side length in pixels less than this - threshold will be omitted from the response. - default: 3 - source: - openapi: streaming-asyncapi.yml - inline: true - StreamLanguage: - docs: Configuration for the language emotion model. - properties: - sentiment: - type: optional> - docs: >- - Configuration for sentiment predictions. If missing or null, no - sentiment predictions will be generated. - toxicity: - type: optional> - docs: >- - Configuration for toxicity predictions. If missing or null, no - toxicity predictions will be generated. - granularity: - type: optional - docs: >- - The granularity at which to generate predictions. Values are `word`, - `sentence`, `utterance`, or `passage`. To get a single prediction for - the entire text of your streaming payload use `passage`. Default value - is `word`. - source: - openapi: streaming-asyncapi.yml - inline: true - Config: - docs: > - Configuration used to specify which models should be used and with what - settings. - properties: - burst: - type: optional> - docs: | - Configuration for the vocal burst emotion model. - - Note: Model configuration is not currently available in streaming. - - Please use the default configuration by passing an empty object `{}`. - face: - type: optional - docs: > - Configuration for the facial expression emotion model. - - - Note: Using the `reset_stream` parameter does not have any effect on - face identification. A single face identifier cache is maintained over - a full session whether `reset_stream` is used or not. - facemesh: - type: optional> - docs: | - Configuration for the facemesh emotion model. - - Note: Model configuration is not currently available in streaming. - - Please use the default configuration by passing an empty object `{}`. - language: - type: optional - docs: Configuration for the language emotion model. - prosody: - type: optional> - docs: | - Configuration for the speech prosody emotion model. - - Note: Model configuration is not currently available in streaming. - - Please use the default configuration by passing an empty object `{}`. - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelsEndpointPayload: - docs: Models endpoint payload - properties: - data: optional - models: - type: optional - docs: > - Configuration used to specify which models should be used and with - what settings. - stream_window_ms: - type: optional - docs: > - Length in milliseconds of streaming sliding window. - - - Extending the length of this window will prepend media context from - past payloads into the current payload. - - - For example, if on the first payload you send 500ms of data and on the - second payload you send an additional 500ms of data, a window of at - least 1000ms will allow the model to process all 1000ms of stream - data. - - - A window of 600ms would append the full 500ms of the second payload to - the last 100ms of the first payload. - - - Note: This feature is currently only supported for audio data and - audio models. For other file types and models this parameter will be - ignored. - default: 5000 - validation: - min: 500 - max: 10000 - reset_stream: - type: optional - docs: > - Whether to reset the streaming sliding window before processing the - current payload. - - - If this parameter is set to `true` then past context will be deleted - before processing the current payload. - - - Use reset_stream when one audio file is done being processed and you - do not want context to leak across files. - default: false - raw_text: - type: optional - docs: > - Set to `true` to enable the data parameter to be parsed as raw text - rather than base64 encoded bytes. - - This parameter is useful if you want to send text to be processed by - the language model, but it cannot be used with other file types like - audio, image, or video. - default: false - job_details: - type: optional - docs: > - Set to `true` to get details about the job. - - - This parameter can be set in the same payload as data or it can be set - without data and models configuration to get the job details between - payloads. - - - This parameter is useful to get the unique job ID. - default: false - payload_id: - type: optional - docs: > - Pass an arbitrary string as the payload ID and get it back at the top - level of the socket response. - - - This can be useful if you have multiple requests running - asynchronously and want to disambiguate responses as they are - received. - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsJobDetails: - docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsBurstPredictionsItem: - properties: - time: optional - emotions: optional - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsBurst: - docs: Response for the vocal burst emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsFacePredictionsItem: - properties: - frame: - type: optional - docs: Frame number - time: - type: optional - docs: Time in seconds when face detection occurred. - bbox: optional - prob: - type: optional - docs: The predicted probability that a detected face was actually a face. - face_id: - type: optional - docs: >- - Identifier for a face. Not that this defaults to `unknown` unless face - identification is enabled in the face model configuration. - emotions: optional - facs: optional - descriptions: optional - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsFace: - docs: Response for the facial expression emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsFacemeshPredictionsItem: - properties: - emotions: optional - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsFacemesh: - docs: Response for the facemesh emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsLanguagePredictionsItem: - properties: - text: - type: optional - docs: A segment of text (like a word or a sentence). - position: optional - emotions: optional - sentiment: optional - toxicity: optional - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsLanguage: - docs: Response for the language emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsProsodyPredictionsItem: - properties: - time: optional - emotions: optional - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsProsody: - docs: Response for the speech prosody emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictions: - docs: Model predictions - properties: - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - burst: - type: optional - docs: Response for the vocal burst emotion model. - face: - type: optional - docs: Response for the facial expression emotion model. - facemesh: - type: optional - docs: Response for the facemesh emotion model. - language: - type: optional - docs: Response for the language emotion model. - prosody: - type: optional - docs: Response for the speech prosody emotion model. - source: - openapi: streaming-asyncapi.yml - inline: true - JobDetails: - docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: streaming-asyncapi.yml - inline: true - StreamErrorMessage: - docs: Error message - properties: - error: - type: optional - docs: Error message text. - code: - type: optional - docs: Unique identifier for the error. - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - source: - openapi: streaming-asyncapi.yml - inline: true - StreamWarningMessageJobDetails: - docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: streaming-asyncapi.yml - inline: true - StreamWarningMessage: - docs: Warning message - properties: - warning: - type: optional - docs: Warning message text. - code: - type: optional - docs: Unique identifier for the error. - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - source: - openapi: streaming-asyncapi.yml - inline: true - SubscribeEvent: - discriminated: false - union: - - type: StreamModelPredictions - docs: Model predictions - - type: StreamErrorMessage - docs: Error message - - type: StreamWarningMessage - docs: Warning message - source: - openapi: streaming-asyncapi.yml -imports: - streamRoot: __package__.yml diff --git a/.mock/definition/tts/__package__.yml b/.mock/definition/tts/__package__.yml deleted file mode 100644 index 53427e35..00000000 --- a/.mock/definition/tts/__package__.yml +++ /dev/null @@ -1,928 +0,0 @@ -errors: - UnprocessableEntityError: - status-code: 422 - type: HTTPValidationError - docs: Validation Error - examples: - - value: {} - BadRequestError: - status-code: 400 - type: ErrorResponse - docs: Bad Request - examples: - - value: {} -service: - auth: false - base-path: '' - endpoints: - synthesize-json: - path: /v0/tts - method: POST - docs: >- - Synthesizes one or more input texts into speech using the specified - voice. If no voice is provided, a novel voice will be generated - dynamically. Optionally, additional context can be included to influence - the speech's style and prosody. - - - The response includes the base64-encoded audio and metadata in JSON - format. - source: - openapi: tts-openapi.json - display-name: Text-to-Speech (Json) - request: - body: - type: PostedTts - content-type: application/json - response: - docs: Successful Response - type: ReturnTts - status-code: 200 - errors: - - UnprocessableEntityError - examples: - - request: - context: - utterances: - - text: How can people see beauty so differently? - description: >- - A curious student with a clear and respectful tone, seeking - clarification on Hume's ideas with a straightforward - question. - format: - type: mp3 - num_generations: 1 - utterances: - - text: >- - Beauty is no quality in things themselves: It exists merely in - the mind which contemplates them. - description: >- - Middle-aged masculine voice with a clear, rhythmic Scots lilt, - rounded vowels, and a warm, steady tone with an articulate, - academic quality. - response: - body: - generations: - - audio: //PExAA0DDYRvkpNfhv3JI5JZ...etc. - duration: 7.44225 - encoding: - format: mp3 - sample_rate: 48000 - file_size: 120192 - generation_id: 795c949a-1510-4a80-9646-7d0863b023ab - snippets: - - - audio: //PExAA0DDYRvkpNfhv3JI5JZ...etc. - generation_id: 795c949a-1510-4a80-9646-7d0863b023ab - id: 37b1b1b1-1b1b-1b1b-1b1b-1b1b1b1b1b1b - text: >- - Beauty is no quality in things themselves: It exists - merely in the mind which contemplates them. - utterance_index: 0 - timestamps: [] - request_id: 66e01f90-4501-4aa0-bbaf-74f45dc15aa725906 - synthesize-file: - path: /v0/tts/file - method: POST - docs: >- - Synthesizes one or more input texts into speech using the specified - voice. If no voice is provided, a novel voice will be generated - dynamically. Optionally, additional context can be included to influence - the speech's style and prosody. - - - The response contains the generated audio file in the requested format. - source: - openapi: tts-openapi.json - display-name: Text-to-Speech (File) - request: - body: - type: PostedTts - content-type: application/json - response: - docs: OK - type: file - status-code: 200 - errors: - - UnprocessableEntityError - examples: - - request: - context: - generation_id: 09ad914d-8e7f-40f8-a279-e34f07f7dab2 - format: - type: mp3 - num_generations: 1 - utterances: - - text: >- - Beauty is no quality in things themselves: It exists merely in - the mind which contemplates them. - description: >- - Middle-aged masculine voice with a clear, rhythmic Scots lilt, - rounded vowels, and a warm, steady tone with an articulate, - academic quality. - synthesize-file-streaming: - path: /v0/tts/stream/file - method: POST - docs: >- - Streams synthesized speech using the specified voice. If no voice is - provided, a novel voice will be generated dynamically. Optionally, - additional context can be included to influence the speech's style and - prosody. - source: - openapi: tts-openapi.json - display-name: Text-to-Speech (Streamed File) - request: - body: - type: PostedTts - content-type: application/json - response: - docs: OK - type: file - status-code: 200 - errors: - - UnprocessableEntityError - examples: - - request: - utterances: - - text: >- - Beauty is no quality in things themselves: It exists merely in - the mind which contemplates them. - voice: - name: Male English Actor - provider: HUME_AI - synthesize-json-streaming: - path: /v0/tts/stream/json - method: POST - docs: >- - Streams synthesized speech using the specified voice. If no voice is - provided, a novel voice will be generated dynamically. Optionally, - additional context can be included to influence the speech's style and - prosody. - - - The response is a stream of JSON objects including audio encoded in - base64. - source: - openapi: tts-openapi.json - display-name: Text-to-Speech (Streamed JSON) - request: - body: - type: PostedTts - content-type: application/json - response-stream: - docs: Successful Response - type: TtsOutput - format: json - errors: - - UnprocessableEntityError - examples: - - request: - utterances: - - text: >- - Beauty is no quality in things themselves: It exists merely in - the mind which contemplates them. - voice: - name: Male English Actor - provider: HUME_AI - convertVoiceFile: - path: /v0/tts/voice_conversion/file - method: POST - source: - openapi: tts-openapi.json - display-name: Voice Conversion (Streamed File) - request: - name: ConvertVoiceFileRequest - body: - properties: - strip_headers: - type: optional - docs: >- - If enabled, the audio for all the chunks of a generation, once - concatenated together, will constitute a single audio file. - Otherwise, if disabled, each chunk's audio will be its own audio - file, each with its own headers (if applicable). - audio: file - context: - type: optional - docs: >- - Utterances to use as context for generating consistent speech - style and prosody across multiple requests. These will not be - converted to speech output. - voice: optional - format: - type: optional - docs: Specifies the output audio file format. - include_timestamp_types: - type: optional> - docs: The set of timestamp types to include in the response. - content-type: multipart/form-data - response: - docs: Successful Response - type: file - status-code: 200 - errors: - - UnprocessableEntityError - convertVoiceJson: - path: /v0/tts/voice_conversion/json - method: POST - source: - openapi: tts-openapi.json - display-name: Voice Conversion (Streamed JSON) - request: - name: ConvertVoiceJsonRequest - body: - properties: - strip_headers: - type: optional - docs: >- - If enabled, the audio for all the chunks of a generation, once - concatenated together, will constitute a single audio file. - Otherwise, if disabled, each chunk's audio will be its own audio - file, each with its own headers (if applicable). - audio: optional - context: - type: optional - docs: >- - Utterances to use as context for generating consistent speech - style and prosody across multiple requests. These will not be - converted to speech output. - voice: optional - format: - type: optional - docs: Specifies the output audio file format. - include_timestamp_types: - type: optional> - docs: The set of timestamp types to include in the response. - content-type: multipart/form-data - response-stream: - docs: Successful Response - type: TtsOutput - format: json - errors: - - UnprocessableEntityError - examples: - - request: {} - response: - stream: - - audio: audio - audio_format: mp3 - chunk_index: 1 - generation_id: generation_id - is_last_chunk: true - request_id: request_id - snippet: - audio: audio - generation_id: generation_id - id: id - text: text - timestamps: - - text: text - time: - begin: 1 - end: 1 - type: word - transcribed_text: transcribed_text - utterance_index: 1 - snippet_id: snippet_id - text: text - transcribed_text: transcribed_text - type: audio - utterance_index: 1 - source: - openapi: tts-openapi.json -types: - PostedContext: - discriminated: false - docs: >- - Utterances to use as context for generating consistent speech style and - prosody across multiple requests. These will not be converted to speech - output. - union: - - type: PostedContextWithGenerationId - - type: PostedContextWithUtterances - source: - openapi: tts-openapi.json - inline: true - Format: - discriminated: false - docs: Specifies the output audio file format. - union: - - type: FormatMp3 - - type: FormatPcm - - type: FormatWav - source: - openapi: tts-openapi.json - inline: true - AudioFormatType: - enum: - - mp3 - - pcm - - wav - source: - openapi: tts-openapi.json - PublishTts: - docs: Input message type for the TTS stream. - properties: - close: - type: optional - docs: Force the generation of audio and close the stream. - default: false - description: - type: optional - docs: >- - Natural language instructions describing how the text should be spoken - by the model (e.g., `"a soft, gentle voice with a strong British - accent"`). - validation: - maxLength: 1000 - flush: - type: optional - docs: >- - Force the generation of audio regardless of how much text has been - supplied. - default: false - speed: - type: optional - docs: A relative measure of how fast this utterance should be spoken. - default: 1 - validation: - min: 0.25 - max: 3 - text: - type: optional - docs: The input text to be converted to speech output. - default: '' - validation: - maxLength: 5000 - trailing_silence: - type: optional - docs: Duration of trailing silence (in seconds) to add to this utterance - default: 0 - validation: - min: 0 - max: 5 - voice: - type: optional - docs: >- - The name or ID of the voice from the `Voice Library` to be used as the - speaker for this and all subsequent utterances, until the `"voice"` - field is updated again. - source: - openapi: tts-asyncapi.json - MillisecondInterval: - properties: - begin: - type: integer - docs: Start time of the interval in milliseconds. - end: - type: integer - docs: End time of the interval in milliseconds. - source: - openapi: tts-openapi.json - TimestampMessage: - docs: A word or phoneme level timestamp for the generated audio. - properties: - generation_id: - type: string - docs: >- - The generation ID of the parent snippet that this chunk corresponds - to. - request_id: - type: string - docs: ID of the initiating request. - snippet_id: - type: string - docs: The ID of the parent snippet that this chunk corresponds to. - timestamp: - type: Timestamp - docs: A word or phoneme level timestamp for the generated audio. - type: literal<"timestamp"> - source: - openapi: tts-openapi.json - SnippetAudioChunk: - docs: Metadata for a chunk of generated audio. - properties: - audio: - type: string - docs: The generated audio output chunk in the requested format. - audio_format: - type: AudioFormatType - docs: The generated audio output format. - chunk_index: - type: integer - docs: The index of the audio chunk in the snippet. - generation_id: - type: string - docs: >- - The generation ID of the parent snippet that this chunk corresponds - to. - is_last_chunk: - type: boolean - docs: >- - Whether or not this is the last chunk streamed back from the decoder - for one input snippet. - request_id: - type: string - docs: ID of the initiating request. - snippet: optional - snippet_id: - type: string - docs: The ID of the parent snippet that this chunk corresponds to. - text: - type: string - docs: The text of the parent snippet that this chunk corresponds to. - transcribed_text: - type: optional - docs: >- - The transcribed text of the generated audio of the parent snippet that - this chunk corresponds to. It is only present if `instant_mode` is set - to `false`. - type: literal<"audio"> - utterance_index: - type: optional - docs: >- - The index of the utterance in the request that the parent snippet of - this chunk corresponds to. - source: - openapi: tts-openapi.json - Timestamp: - properties: - text: - type: string - docs: The word or phoneme text that the timestamp corresponds to. - time: - type: MillisecondInterval - docs: The start and end timestamps for the word or phoneme in milliseconds. - type: - type: TimestampType - source: - openapi: tts-openapi.json - TimestampType: - enum: - - word - - phoneme - source: - openapi: tts-openapi.json - PostedUtteranceVoiceWithId: - properties: - id: - type: string - docs: The unique ID associated with the **Voice**. - provider: - type: optional - docs: >- - Specifies the source provider associated with the chosen voice. - - - - **`HUME_AI`**: Select voices from Hume's [Voice - Library](https://platform.hume.ai/tts/voice-library), containing a - variety of preset, shared voices. - - - **`CUSTOM_VOICE`**: Select from voices you've personally generated - and saved in your account. - - - If no provider is explicitly set, the default provider is - `CUSTOM_VOICE`. When using voices from Hume's **Voice Library**, you - must explicitly set the provider to `HUME_AI`. - - - Preset voices from Hume's **Voice Library** are accessible by all - users. In contrast, your custom voices are private and accessible only - via requests authenticated with your API key. - source: - openapi: tts-openapi.json - PostedUtteranceVoiceWithName: - properties: - name: - type: string - docs: The name of a **Voice**. - provider: - type: optional - docs: >- - Specifies the source provider associated with the chosen voice. - - - - **`HUME_AI`**: Select voices from Hume's [Voice - Library](https://platform.hume.ai/tts/voice-library), containing a - variety of preset, shared voices. - - - **`CUSTOM_VOICE`**: Select from voices you've personally generated - and saved in your account. - - - If no provider is explicitly set, the default provider is - `CUSTOM_VOICE`. When using voices from Hume's **Voice Library**, you - must explicitly set the provider to `HUME_AI`. - - - Preset voices from Hume's **Voice Library** are accessible by all - users. In contrast, your custom voices are private and accessible only - via requests authenticated with your API key. - source: - openapi: tts-openapi.json - VoiceProvider: - enum: - - HUME_AI - - CUSTOM_VOICE - source: - openapi: tts-openapi.json - PostedUtteranceVoice: - discriminated: false - union: - - type: PostedUtteranceVoiceWithId - - type: PostedUtteranceVoiceWithName - source: - openapi: tts-openapi.json - OctaveVersion: - enum: - - value: '1' - name: One - - value: '2' - name: Two - docs: >- - Selects the Octave model version used to synthesize speech for this - request. If you omit this field, Hume automatically routes the request to - the most appropriate model. Setting a specific version ensures stable and - repeatable behavior across requests. - - - Use `2` to opt into the latest Octave capabilities. When you specify - version `2`, you must also provide a `voice`. Requests that set `version: - 2` without a voice will be rejected. - - - For a comparison of Octave versions, see the [Octave - versions](/docs/text-to-speech-tts/overview#octave-versions) section in - the TTS overview. - source: - openapi: tts-openapi.json - TtsOutput: - discriminated: false - union: - - type: SnippetAudioChunk - - type: TimestampMessage - source: - openapi: tts-openapi.json - Snippet: - properties: - audio: - type: string - docs: >- - The segmented audio output in the requested format, encoded as a - base64 string. - generation_id: - type: string - docs: The generation ID this snippet corresponds to. - id: - type: string - docs: A unique ID associated with this **Snippet**. - text: - type: string - docs: The text for this **Snippet**. - timestamps: - docs: >- - A list of word or phoneme level timestamps for the generated audio. - Timestamps are only returned for Octave 2 requests. - type: list - transcribed_text: - type: optional - docs: >- - The transcribed text of the generated audio. It is only present if - `instant_mode` is set to `false`. - utterance_index: - type: optional - docs: The index of the utterance in the request this snippet corresponds to. - source: - openapi: tts-openapi.json - PostedContextWithGenerationId: - properties: - generation_id: - type: string - docs: >- - The ID of a prior TTS generation to use as context for generating - consistent speech style and prosody across multiple requests. - Including context may increase audio generation times. - source: - openapi: tts-openapi.json - PostedContextWithUtterances: - properties: - utterances: - type: list - source: - openapi: tts-openapi.json - AudioEncoding: - docs: >- - Encoding information about the generated audio, including the `format` and - `sample_rate`. - properties: - format: - type: AudioFormatType - docs: Format for the output audio. - sample_rate: - type: integer - docs: >- - The sample rate (`Hz`) of the generated audio. The default sample rate - is `48000 Hz`. - source: - openapi: tts-openapi.json - ErrorResponse: - properties: - code: optional - error: optional - message: optional - source: - openapi: tts-openapi.json - ReturnGeneration: - properties: - audio: - type: string - docs: >- - The generated audio output in the requested format, encoded as a - base64 string. - duration: - type: double - docs: Duration of the generated audio in seconds. - encoding: - type: AudioEncoding - file_size: - type: integer - docs: Size of the generated audio in bytes. - generation_id: - type: string - docs: >- - A unique ID associated with this TTS generation that can be used as - context for generating consistent speech style and prosody across - multiple requests. - snippets: - docs: >- - A list of snippet groups where each group corresponds to an utterance - in the request. Each group contains segmented snippets that represent - the original utterance divided into more natural-sounding units - optimized for speech delivery. - type: list> - source: - openapi: tts-openapi.json - HTTPValidationError: - properties: - detail: - type: optional> - source: - openapi: tts-openapi.json - FormatMp3: - properties: - type: literal<"mp3"> - source: - openapi: tts-openapi.json - PostedTts: - properties: - context: - type: optional - docs: >- - Utterances to use as context for generating consistent speech style - and prosody across multiple requests. These will not be converted to - speech output. - format: - type: optional - docs: Specifies the output audio file format. - include_timestamp_types: - type: optional> - docs: >- - The set of timestamp types to include in the response. Only supported - for Octave 2 requests. - num_generations: - type: optional - docs: >- - Number of audio generations to produce from the input utterances. - - - Using `num_generations` enables faster processing than issuing - multiple sequential requests. Additionally, specifying - `num_generations` allows prosody continuation across all generations - without repeating context, ensuring each generation sounds slightly - different while maintaining contextual consistency. - default: 1 - validation: - min: 1 - max: 5 - split_utterances: - type: optional - docs: >- - Controls how audio output is segmented in the response. - - - - When **enabled** (`true`), input utterances are automatically split - into natural-sounding speech segments. - - - - When **disabled** (`false`), the response maintains a strict - one-to-one mapping between input utterances and output snippets. - - - This setting affects how the `snippets` array is structured in the - response, which may be important for applications that need to track - the relationship between input text and generated audio segments. When - setting to `false`, avoid including utterances with long `text`, as - this can result in distorted output. - default: true - strip_headers: - type: optional - docs: >- - If enabled, the audio for all the chunks of a generation, once - concatenated together, will constitute a single audio file. Otherwise, - if disabled, each chunk's audio will be its own audio file, each with - its own headers (if applicable). - default: false - utterances: - docs: >- - A list of **Utterances** to be converted to speech output. - - - An **Utterance** is a unit of input for - [Octave](/docs/text-to-speech-tts/overview), and includes input - `text`, an optional `description` to serve as the prompt for how the - speech should be delivered, an optional `voice` specification, and - additional controls to guide delivery for `speed` and - `trailing_silence`. - type: list - version: - type: optional - docs: >- - Selects the Octave model version used to synthesize speech for this - request. If you omit this field, Hume automatically routes the request - to the most appropriate model. Setting a specific version ensures - stable and repeatable behavior across requests. - - - Use `2` to opt into the latest Octave capabilities. When you specify - version `2`, you must also provide a `voice`. Requests that set - `version: 2` without a voice will be rejected. - - - For a comparison of Octave versions, see the [Octave - versions](/docs/text-to-speech-tts/overview#octave-versions) section - in the TTS overview. - instant_mode: - type: optional - docs: >- - Enables ultra-low latency streaming, significantly reducing the time - until the first audio chunk is received. Recommended for real-time - applications requiring immediate audio playback. For further details, - see our documentation on [instant - mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - - - A - [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) - must be specified when instant mode is enabled. Dynamic voice - generation is not supported with this mode. - - - Instant mode is only supported for streaming endpoints (e.g., - [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), - [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). - - - Ensure only a single generation is requested - ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) - must be `1` or omitted). - default: true - source: - openapi: tts-openapi.json - ReturnTts: - properties: - generations: - type: list - request_id: - type: optional - docs: >- - A unique ID associated with this request for tracking and - troubleshooting. Use this ID when contacting [support](/support) for - troubleshooting assistance. - source: - openapi: tts-openapi.json - ReturnVoice: - docs: An Octave voice available for text-to-speech - properties: - compatible_octave_models: optional> - id: - type: optional - docs: ID of the voice in the `Voice Library`. - name: - type: optional - docs: Name of the voice in the `Voice Library`. - provider: - type: optional - docs: >- - The provider associated with the created voice. - - - Voices created through this endpoint will always have the provider set - to `CUSTOM_VOICE`, indicating a custom voice stored in your account. - source: - openapi: tts-openapi.json - FormatPcm: - properties: - type: literal<"pcm"> - source: - openapi: tts-openapi.json - PostedUtterance: - properties: - description: - type: optional - docs: >- - Natural language instructions describing how the synthesized speech - should sound, including but not limited to tone, intonation, pacing, - and accent. - - - **This field behaves differently depending on whether a voice is - specified**: - - - **Voice specified**: the description will serve as acting directions - for delivery. Keep directions concise—100 characters or fewer—for best - results. See our guide on [acting - instructions](/docs/text-to-speech-tts/acting-instructions). - - - **Voice not specified**: the description will serve as a voice - prompt for generating a voice. See our [prompting - guide](/docs/text-to-speech-tts/prompting) for design tips. - validation: - maxLength: 1000 - speed: - type: optional - docs: >- - Speed multiplier for the synthesized speech. Extreme values below 0.75 - and above 1.5 may sometimes cause instability to the generated output. - default: 1 - validation: - min: 0.5 - max: 2 - text: - type: string - docs: The input text to be synthesized into speech. - validation: - maxLength: 5000 - trailing_silence: - type: optional - docs: Duration of trailing silence (in seconds) to add to this utterance - default: 0 - validation: - min: 0 - max: 5 - voice: - type: optional - docs: >- - The `name` or `id` associated with a **Voice** from the **Voice - Library** to be used as the speaker for this and all subsequent - `utterances`, until the `voice` field is updated again. - - See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**. - source: - openapi: tts-openapi.json - ValidationErrorLocItem: - discriminated: false - union: - - string - - integer - source: - openapi: tts-openapi.json - inline: true - ValidationError: - properties: - loc: - type: list - msg: string - type: string - source: - openapi: tts-openapi.json - FormatWav: - properties: - type: literal<"wav"> - source: - openapi: tts-openapi.json - ReturnPagedVoices: - docs: A paginated list Octave voices available for text-to-speech - properties: - page_number: - type: optional - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: optional - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: optional - docs: The total number of pages in the collection. - voices_page: - type: optional> - docs: >- - List of voices returned for the specified `page_number` and - `page_size`. - source: - openapi: tts-openapi.json diff --git a/.mock/definition/tts/streamInput.yml b/.mock/definition/tts/streamInput.yml deleted file mode 100644 index 807536e2..00000000 --- a/.mock/definition/tts/streamInput.yml +++ /dev/null @@ -1,96 +0,0 @@ -imports: - root: __package__.yml -channel: - path: /stream/input - url: tts - auth: false - docs: Generate emotionally expressive speech. - query-parameters: - access_token: - type: optional - default: '' - docs: >- - Access token used for authenticating the client. If not provided, an - `api_key` must be provided to authenticate. - - - The access token is generated using both an API key and a Secret key, - which provides an additional layer of security compared to using just an - API key. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - context_generation_id: - type: optional - docs: >- - The ID of a prior TTS generation to use as context for generating - consistent speech style and prosody across multiple requests. Including - context may increase audio generation times. - format_type: - type: optional - docs: The format to be used for audio generation. - include_timestamp_types: - type: optional - allow-multiple: true - docs: The set of timestamp types to include in the response. - instant_mode: - type: optional - default: true - docs: >- - Enables ultra-low latency streaming, significantly reducing the time - until the first audio chunk is received. Recommended for real-time - applications requiring immediate audio playback. For further details, - see our documentation on [instant - mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - no_binary: - type: optional - default: false - docs: If enabled, no binary websocket messages will be sent to the client. - strip_headers: - type: optional - default: false - docs: >- - If enabled, the audio for all the chunks of a generation, once - concatenated together, will constitute a single audio file. Otherwise, - if disabled, each chunk's audio will be its own audio file, each with - its own headers (if applicable). - version: - type: optional - docs: >- - The version of the Octave Model to use. 1 for the legacy model, 2 for - the new model. - api_key: - type: optional - default: '' - docs: >- - API key used for authenticating the client. If not provided, an - `access_token` must be provided to authenticate. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - messages: - publish: - origin: client - body: - type: root.PublishTts - subscribe: - origin: server - body: - type: root.TtsOutput - examples: - - messages: - - type: publish - body: {} - - type: subscribe - body: - audio: audio - audio_format: mp3 - chunk_index: 1 - generation_id: generation_id - is_last_chunk: true - request_id: request_id - snippet_id: snippet_id - text: text - type: audio diff --git a/.mock/definition/tts/voices.yml b/.mock/definition/tts/voices.yml deleted file mode 100644 index 198a700d..00000000 --- a/.mock/definition/tts/voices.yml +++ /dev/null @@ -1,140 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list: - path: /v0/tts/voices - method: GET - docs: >- - Lists voices you have saved in your account, or voices from the [Voice - Library](https://platform.hume.ai/tts/voice-library). - pagination: - offset: $request.page_number - results: $response.voices_page - source: - openapi: tts-openapi.json - display-name: List voices - request: - name: VoicesListRequest - query-parameters: - provider: - type: root.VoiceProvider - docs: >- - Specify the voice provider to filter voices returned by the - endpoint: - - - - **`HUME_AI`**: Lists preset, shared voices from Hume's [Voice - Library](https://platform.hume.ai/tts/voice-library). - - - **`CUSTOM_VOICE`**: Lists custom voices created and saved to - your account. - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: optional - response: - docs: Success - type: root.ReturnPagedVoices - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - provider: CUSTOM_VOICE - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - voices_page: - - id: c42352c0-4566-455d-b180-0f654b65b525 - name: David Hume - provider: CUSTOM_VOICE - - id: d87352b0-26a3-4b11-081b-d157a5674d19 - name: Goliath Hume - provider: CUSTOM_VOICE - create: - path: /v0/tts/voices - method: POST - docs: >- - Saves a new custom voice to your account using the specified TTS - generation ID. - - - Once saved, this voice can be reused in subsequent TTS requests, - ensuring consistent speech style and prosody. For more details on voice - creation, see the [Voices Guide](/docs/text-to-speech-tts/voices). - source: - openapi: tts-openapi.json - display-name: Create voice - request: - name: PostedVoice - body: - properties: - generation_id: - type: string - docs: >- - A unique ID associated with this TTS generation that can be used - as context for generating consistent speech style and prosody - across multiple requests. - name: - type: string - docs: Name of the voice in the `Voice Library`. - content-type: application/json - response: - docs: Successful Response - type: root.ReturnVoice - status-code: 200 - errors: - - root.UnprocessableEntityError - examples: - - request: - generation_id: 795c949a-1510-4a80-9646-7d0863b023ab - name: David Hume - response: - body: - id: c42352c0-4566-455d-b180-0f654b65b525 - name: David Hume - provider: CUSTOM_VOICE - delete: - path: /v0/tts/voices - method: DELETE - docs: Deletes a previously generated custom voice. - source: - openapi: tts-openapi.json - display-name: Delete voice - request: - name: VoicesDeleteRequest - query-parameters: - name: - type: string - docs: Name of the voice to delete - errors: - - root.BadRequestError - examples: - - query-parameters: - name: David Hume - source: - openapi: tts-openapi.json diff --git a/.mock/fern.config.json b/.mock/fern.config.json deleted file mode 100644 index 904feebc..00000000 --- a/.mock/fern.config.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "organization" : "hume", - "version" : "0.108.0" -} \ No newline at end of file diff --git a/LICENSE b/LICENSE index 00f8516e..79c2611e 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2025 Hume. +Copyright (c) 2026 Hume. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/poetry.lock b/poetry.lock index 7d8603a9..659dc75e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "aiofiles" @@ -24,23 +24,22 @@ files = [ [[package]] name = "anyio" -version = "4.11.0" +version = "4.12.1" description = "High-level concurrency and networking framework on top of asyncio or Trio" optional = false python-versions = ">=3.9" files = [ - {file = "anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc"}, - {file = "anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4"}, + {file = "anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c"}, + {file = "anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703"}, ] [package.dependencies] exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} idna = ">=2.8" -sniffio = ">=1.1" typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] -trio = ["trio (>=0.31.0)"] +trio = ["trio (>=0.31.0)", "trio (>=0.32.0)"] [[package]] name = "appnope" @@ -202,17 +201,17 @@ dev = ["backports.zoneinfo", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest [[package]] name = "beautifulsoup4" -version = "4.14.2" +version = "4.14.3" description = "Screen-scraping library" optional = true python-versions = ">=3.7.0" files = [ - {file = "beautifulsoup4-4.14.2-py3-none-any.whl", hash = "sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515"}, - {file = "beautifulsoup4-4.14.2.tar.gz", hash = "sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e"}, + {file = "beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb"}, + {file = "beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86"}, ] [package.dependencies] -soupsieve = ">1.2" +soupsieve = ">=1.6.1" typing-extensions = ">=4.0.0" [package.extras] @@ -242,13 +241,13 @@ css = ["tinycss2 (>=1.1.0,<1.5)"] [[package]] name = "certifi" -version = "2025.11.12" +version = "2026.1.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" files = [ - {file = "certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b"}, - {file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"}, + {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, + {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, ] [[package]] @@ -632,41 +631,41 @@ toml = ["tomli"] [[package]] name = "debugpy" -version = "1.8.17" +version = "1.8.19" description = "An implementation of the Debug Adapter Protocol for Python" optional = true python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.17-cp310-cp310-macosx_15_0_x86_64.whl", hash = "sha256:c41d2ce8bbaddcc0009cc73f65318eedfa3dbc88a8298081deb05389f1ab5542"}, - {file = "debugpy-1.8.17-cp310-cp310-manylinux_2_34_x86_64.whl", hash = "sha256:1440fd514e1b815edd5861ca394786f90eb24960eb26d6f7200994333b1d79e3"}, - {file = "debugpy-1.8.17-cp310-cp310-win32.whl", hash = "sha256:3a32c0af575749083d7492dc79f6ab69f21b2d2ad4cd977a958a07d5865316e4"}, - {file = "debugpy-1.8.17-cp310-cp310-win_amd64.whl", hash = "sha256:a3aad0537cf4d9c1996434be68c6c9a6d233ac6f76c2a482c7803295b4e4f99a"}, - {file = "debugpy-1.8.17-cp311-cp311-macosx_15_0_universal2.whl", hash = "sha256:d3fce3f0e3de262a3b67e69916d001f3e767661c6e1ee42553009d445d1cd840"}, - {file = "debugpy-1.8.17-cp311-cp311-manylinux_2_34_x86_64.whl", hash = "sha256:c6bdf134457ae0cac6fb68205776be635d31174eeac9541e1d0c062165c6461f"}, - {file = "debugpy-1.8.17-cp311-cp311-win32.whl", hash = "sha256:e79a195f9e059edfe5d8bf6f3749b2599452d3e9380484cd261f6b7cd2c7c4da"}, - {file = "debugpy-1.8.17-cp311-cp311-win_amd64.whl", hash = "sha256:b532282ad4eca958b1b2d7dbcb2b7218e02cb934165859b918e3b6ba7772d3f4"}, - {file = "debugpy-1.8.17-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:f14467edef672195c6f6b8e27ce5005313cb5d03c9239059bc7182b60c176e2d"}, - {file = "debugpy-1.8.17-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:24693179ef9dfa20dca8605905a42b392be56d410c333af82f1c5dff807a64cc"}, - {file = "debugpy-1.8.17-cp312-cp312-win32.whl", hash = "sha256:6a4e9dacf2cbb60d2514ff7b04b4534b0139facbf2abdffe0639ddb6088e59cf"}, - {file = "debugpy-1.8.17-cp312-cp312-win_amd64.whl", hash = "sha256:e8f8f61c518952fb15f74a302e068b48d9c4691768ade433e4adeea961993464"}, - {file = "debugpy-1.8.17-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:857c1dd5d70042502aef1c6d1c2801211f3ea7e56f75e9c335f434afb403e464"}, - {file = "debugpy-1.8.17-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:3bea3b0b12f3946e098cce9b43c3c46e317b567f79570c3f43f0b96d00788088"}, - {file = "debugpy-1.8.17-cp313-cp313-win32.whl", hash = "sha256:e34ee844c2f17b18556b5bbe59e1e2ff4e86a00282d2a46edab73fd7f18f4a83"}, - {file = "debugpy-1.8.17-cp313-cp313-win_amd64.whl", hash = "sha256:6c5cd6f009ad4fca8e33e5238210dc1e5f42db07d4b6ab21ac7ffa904a196420"}, - {file = "debugpy-1.8.17-cp314-cp314-macosx_15_0_universal2.whl", hash = "sha256:045290c010bcd2d82bc97aa2daf6837443cd52f6328592698809b4549babcee1"}, - {file = "debugpy-1.8.17-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:b69b6bd9dba6a03632534cdf67c760625760a215ae289f7489a452af1031fe1f"}, - {file = "debugpy-1.8.17-cp314-cp314-win32.whl", hash = "sha256:5c59b74aa5630f3a5194467100c3b3d1c77898f9ab27e3f7dc5d40fc2f122670"}, - {file = "debugpy-1.8.17-cp314-cp314-win_amd64.whl", hash = "sha256:893cba7bb0f55161de4365584b025f7064e1f88913551bcd23be3260b231429c"}, - {file = "debugpy-1.8.17-cp38-cp38-macosx_15_0_x86_64.whl", hash = "sha256:8deb4e31cd575c9f9370042876e078ca118117c1b5e1f22c32befcfbb6955f0c"}, - {file = "debugpy-1.8.17-cp38-cp38-manylinux_2_34_x86_64.whl", hash = "sha256:b75868b675949a96ab51abc114c7163f40ff0d8f7d6d5fd63f8932fd38e9c6d7"}, - {file = "debugpy-1.8.17-cp38-cp38-win32.whl", hash = "sha256:17e456da14848d618662354e1dccfd5e5fb75deec3d1d48dc0aa0baacda55860"}, - {file = "debugpy-1.8.17-cp38-cp38-win_amd64.whl", hash = "sha256:e851beb536a427b5df8aa7d0c7835b29a13812f41e46292ff80b2ef77327355a"}, - {file = "debugpy-1.8.17-cp39-cp39-macosx_15_0_x86_64.whl", hash = "sha256:f2ac8055a0c4a09b30b931100996ba49ef334c6947e7ae365cdd870416d7513e"}, - {file = "debugpy-1.8.17-cp39-cp39-manylinux_2_34_x86_64.whl", hash = "sha256:eaa85bce251feca8e4c87ce3b954aba84b8c645b90f0e6a515c00394a9f5c0e7"}, - {file = "debugpy-1.8.17-cp39-cp39-win32.whl", hash = "sha256:b13eea5587e44f27f6c48588b5ad56dcb74a4f3a5f89250443c94587f3eb2ea1"}, - {file = "debugpy-1.8.17-cp39-cp39-win_amd64.whl", hash = "sha256:bb1bbf92317e1f35afcf3ef0450219efb3afe00be79d8664b250ac0933b9015f"}, - {file = "debugpy-1.8.17-py2.py3-none-any.whl", hash = "sha256:60c7dca6571efe660ccb7a9508d73ca14b8796c4ed484c2002abba714226cfef"}, - {file = "debugpy-1.8.17.tar.gz", hash = "sha256:fd723b47a8c08892b1a16b2c6239a8b96637c62a59b94bb5dab4bac592a58a8e"}, + {file = "debugpy-1.8.19-cp310-cp310-macosx_15_0_x86_64.whl", hash = "sha256:fce6da15d73be5935b4438435c53adb512326a3e11e4f90793ea87cd9f018254"}, + {file = "debugpy-1.8.19-cp310-cp310-manylinux_2_34_x86_64.whl", hash = "sha256:e24b1652a1df1ab04d81e7ead446a91c226de704ff5dde6bd0a0dbaab07aa3f2"}, + {file = "debugpy-1.8.19-cp310-cp310-win32.whl", hash = "sha256:327cb28c3ad9e17bc925efc7f7018195fd4787c2fe4b7af1eec11f1d19bdec62"}, + {file = "debugpy-1.8.19-cp310-cp310-win_amd64.whl", hash = "sha256:b7dd275cf2c99e53adb9654f5ae015f70415bbe2bacbe24cfee30d54b6aa03c5"}, + {file = "debugpy-1.8.19-cp311-cp311-macosx_15_0_universal2.whl", hash = "sha256:c5dcfa21de1f735a4f7ced4556339a109aa0f618d366ede9da0a3600f2516d8b"}, + {file = "debugpy-1.8.19-cp311-cp311-manylinux_2_34_x86_64.whl", hash = "sha256:806d6800246244004625d5222d7765874ab2d22f3ba5f615416cf1342d61c488"}, + {file = "debugpy-1.8.19-cp311-cp311-win32.whl", hash = "sha256:783a519e6dfb1f3cd773a9bda592f4887a65040cb0c7bd38dde410f4e53c40d4"}, + {file = "debugpy-1.8.19-cp311-cp311-win_amd64.whl", hash = "sha256:14035cbdbb1fe4b642babcdcb5935c2da3b1067ac211c5c5a8fdc0bb31adbcaa"}, + {file = "debugpy-1.8.19-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:bccb1540a49cde77edc7ce7d9d075c1dbeb2414751bc0048c7a11e1b597a4c2e"}, + {file = "debugpy-1.8.19-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:e9c68d9a382ec754dc05ed1d1b4ed5bd824b9f7c1a8cd1083adb84b3c93501de"}, + {file = "debugpy-1.8.19-cp312-cp312-win32.whl", hash = "sha256:6599cab8a783d1496ae9984c52cb13b7c4a3bd06a8e6c33446832a5d97ce0bee"}, + {file = "debugpy-1.8.19-cp312-cp312-win_amd64.whl", hash = "sha256:66e3d2fd8f2035a8f111eb127fa508469dfa40928a89b460b41fd988684dc83d"}, + {file = "debugpy-1.8.19-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:91e35db2672a0abaf325f4868fcac9c1674a0d9ad9bb8a8c849c03a5ebba3e6d"}, + {file = "debugpy-1.8.19-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:85016a73ab84dea1c1f1dcd88ec692993bcbe4532d1b49ecb5f3c688ae50c606"}, + {file = "debugpy-1.8.19-cp313-cp313-win32.whl", hash = "sha256:b605f17e89ba0ecee994391194285fada89cee111cfcd29d6f2ee11cbdc40976"}, + {file = "debugpy-1.8.19-cp313-cp313-win_amd64.whl", hash = "sha256:c30639998a9f9cd9699b4b621942c0179a6527f083c72351f95c6ab1728d5b73"}, + {file = "debugpy-1.8.19-cp314-cp314-macosx_15_0_universal2.whl", hash = "sha256:1e8c4d1bd230067bf1bbcdbd6032e5a57068638eb28b9153d008ecde288152af"}, + {file = "debugpy-1.8.19-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:d40c016c1f538dbf1762936e3aeb43a89b965069d9f60f9e39d35d9d25e6b809"}, + {file = "debugpy-1.8.19-cp314-cp314-win32.whl", hash = "sha256:0601708223fe1cd0e27c6cce67a899d92c7d68e73690211e6788a4b0e1903f5b"}, + {file = "debugpy-1.8.19-cp314-cp314-win_amd64.whl", hash = "sha256:8e19a725f5d486f20e53a1dde2ab8bb2c9607c40c00a42ab646def962b41125f"}, + {file = "debugpy-1.8.19-cp38-cp38-macosx_15_0_x86_64.whl", hash = "sha256:d9b6f633fd2865af2afba2beb0c1819b6ecd4aed1c8f90f5d1bbca3272306b10"}, + {file = "debugpy-1.8.19-cp38-cp38-manylinux_2_34_x86_64.whl", hash = "sha256:a21bfdea088f713df05fa246ba0520f6ba44dd7eaec224742f51987a6979a648"}, + {file = "debugpy-1.8.19-cp38-cp38-win32.whl", hash = "sha256:b1cb98e5325da3059ca24445fca48314bfddfdf65ce1b59ff07055e723f06bd2"}, + {file = "debugpy-1.8.19-cp38-cp38-win_amd64.whl", hash = "sha256:c9b9bf440141a36836bdbe4320a2b126bb38aafa85e1aed05d7bfbb0e2a278bf"}, + {file = "debugpy-1.8.19-cp39-cp39-macosx_15_0_x86_64.whl", hash = "sha256:c047177ab2d286451f242b855b650d313198c4a987140d4b35218b2855a64a4a"}, + {file = "debugpy-1.8.19-cp39-cp39-manylinux_2_34_x86_64.whl", hash = "sha256:4468de0c30012d367944f0eab4ecb8371736e8ef9522a465f61214f344c11183"}, + {file = "debugpy-1.8.19-cp39-cp39-win32.whl", hash = "sha256:7b62c0f015120ede25e5124a5f9d8a424e1208e3d96a36c89958f046ee21fff6"}, + {file = "debugpy-1.8.19-cp39-cp39-win_amd64.whl", hash = "sha256:76f566baaf7f3e06adbe67ffedccd2ee911d1e486f55931939ce3f0fe1090774"}, + {file = "debugpy-1.8.19-py2.py3-none-any.whl", hash = "sha256:360ffd231a780abbc414ba0f005dad409e71c78637efe8f2bd75837132a41d38"}, + {file = "debugpy-1.8.19.tar.gz", hash = "sha256:eea7e5987445ab0b5ed258093722d5ecb8bb72217c5c9b1e21f64efe23ddebdb"}, ] [[package]] @@ -722,13 +721,13 @@ tests = ["pytest"] [[package]] name = "exceptiongroup" -version = "1.3.0" +version = "1.3.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, - {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, + {file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"}, + {file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"}, ] [package.dependencies] @@ -862,13 +861,13 @@ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2 [[package]] name = "importlib-metadata" -version = "8.7.0" +version = "8.7.1" description = "Read metadata from Python packages" optional = false python-versions = ">=3.9" files = [ - {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, - {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, + {file = "importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151"}, + {file = "importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb"}, ] [package.dependencies] @@ -878,10 +877,10 @@ zipp = ">=3.20" check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] +enabler = ["pytest-enabler (>=3.4)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] -type = ["pytest-mypy"] +test = ["flufl.flake8", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["mypy (<1.19)", "pytest-mypy (>=1.0.1)"] [[package]] name = "iniconfig" @@ -1051,18 +1050,15 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "json5" -version = "0.12.1" +version = "0.13.0" description = "A Python implementation of the JSON5 data format." optional = true python-versions = ">=3.8.0" files = [ - {file = "json5-0.12.1-py3-none-any.whl", hash = "sha256:d9c9b3bc34a5f54d43c35e11ef7cb87d8bdd098c6ace87117a7b7e83e705c1d5"}, - {file = "json5-0.12.1.tar.gz", hash = "sha256:b2743e77b3242f8d03c143dd975a6ec7c52e2f2afe76ed934e53503dd4ad4990"}, + {file = "json5-0.13.0-py3-none-any.whl", hash = "sha256:9a08e1dd65f6a4d4c6fa82d216cf2477349ec2346a38fd70cc11d2557499fbcc"}, + {file = "json5-0.13.0.tar.gz", hash = "sha256:b1edf8d487721c0bf64d83c28e91280781f6e21f4a797d3261c7c828d4c165bf"}, ] -[package.extras] -dev = ["build (==1.2.2.post1)", "coverage (==7.5.4)", "coverage (==7.8.0)", "mypy (==1.14.1)", "mypy (==1.15.0)", "pip (==25.0.1)", "pylint (==3.2.7)", "pylint (==3.3.6)", "ruff (==0.11.2)", "twine (==6.1.0)", "uv (==0.6.11)"] - [[package]] name = "jsonpointer" version = "3.0.0" @@ -1302,13 +1298,13 @@ test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (> [[package]] name = "jupyterlab" -version = "4.5.0" +version = "4.5.2" description = "JupyterLab computational environment" optional = true python-versions = ">=3.9" files = [ - {file = "jupyterlab-4.5.0-py3-none-any.whl", hash = "sha256:88e157c75c1afff64c7dc4b801ec471450b922a4eae4305211ddd40da8201c8a"}, - {file = "jupyterlab-4.5.0.tar.gz", hash = "sha256:aec33d6d8f1225b495ee2cf20f0514f45e6df8e360bdd7ac9bace0b7ac5177ea"}, + {file = "jupyterlab-4.5.2-py3-none-any.whl", hash = "sha256:76466ebcfdb7a9bb7e2fbd6459c0e2c032ccf75be673634a84bee4b3e6b13ab6"}, + {file = "jupyterlab-4.5.2.tar.gz", hash = "sha256:c80a6b9f6dace96a566d590c65ee2785f61e7cd4aac5b4d453dcc7d0d5e069b7"}, ] [package.dependencies] @@ -1581,13 +1577,13 @@ files = [ [[package]] name = "mistune" -version = "3.1.4" +version = "3.2.0" description = "A sane and fast Markdown parser with useful plugins and renderers" optional = true python-versions = ">=3.8" files = [ - {file = "mistune-3.1.4-py3-none-any.whl", hash = "sha256:93691da911e5d9d2e23bc54472892aff676df27a75274962ff9edc210364266d"}, - {file = "mistune-3.1.4.tar.gz", hash = "sha256:b5a7f801d389f724ec702840c11d8fc48f2b33519102fc7ee739e8177b672164"}, + {file = "mistune-3.2.0-py3-none-any.whl", hash = "sha256:febdc629a3c78616b94393c6580551e0e34cc289987ec6c35ed3f4be42d0eee1"}, + {file = "mistune-3.2.0.tar.gz", hash = "sha256:708487c8a8cdd99c9d90eb3ed4c3ed961246ff78ac82f03418f5183ab70e398a"}, ] [package.dependencies] @@ -1750,18 +1746,18 @@ files = [ [[package]] name = "notebook" -version = "7.5.0" +version = "7.5.2" description = "Jupyter Notebook - A web-based notebook environment for interactive computing" optional = true python-versions = ">=3.9" files = [ - {file = "notebook-7.5.0-py3-none-any.whl", hash = "sha256:3300262d52905ca271bd50b22617681d95f08a8360d099e097726e6d2efb5811"}, - {file = "notebook-7.5.0.tar.gz", hash = "sha256:3b27eaf9913033c28dde92d02139414c608992e1df4b969c843219acf2ff95e4"}, + {file = "notebook-7.5.2-py3-none-any.whl", hash = "sha256:17d078a98603d70d62b6b4b3fcb67e87d7a68c398a7ae9b447eb2d7d9aec9979"}, + {file = "notebook-7.5.2.tar.gz", hash = "sha256:83e82f93c199ca730313bea1bb24bc279ea96f74816d038a92d26b6b9d5f3e4a"}, ] [package.dependencies] jupyter-server = ">=2.4.0,<3" -jupyterlab = ">=4.5.0rc0,<4.6" +jupyterlab = ">=4.5.2,<4.6" jupyterlab-server = ">=2.28.0,<3" notebook-shim = ">=0.2,<0.3" tornado = ">=6.2.0" @@ -1883,16 +1879,18 @@ testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "prometheus-client" -version = "0.23.1" +version = "0.24.0" description = "Python client for the Prometheus monitoring system." optional = true python-versions = ">=3.9" files = [ - {file = "prometheus_client-0.23.1-py3-none-any.whl", hash = "sha256:dd1913e6e76b59cfe44e7a4b83e01afc9873c1bdfd2ed8739f1e76aeca115f99"}, - {file = "prometheus_client-0.23.1.tar.gz", hash = "sha256:6ae8f9081eaaaf153a2e959d2e6c4f4fb57b12ef76c8c7980202f1e57b48b2ce"}, + {file = "prometheus_client-0.24.0-py3-none-any.whl", hash = "sha256:4ab6d4fb5a1b25ad74b58e6271857e356fff3399473e599d227ab5d0ce6637f0"}, + {file = "prometheus_client-0.24.0.tar.gz", hash = "sha256:726b40c0d499f4904d4b5b7abe8d43e6aff090de0d468ae8f2226290b331c667"}, ] [package.extras] +aiohttp = ["aiohttp"] +django = ["django"] twisted = ["twisted"] [[package]] @@ -1911,35 +1909,37 @@ wcwidth = "*" [[package]] name = "psutil" -version = "7.1.3" +version = "7.2.1" description = "Cross-platform lib for process and system monitoring." optional = true python-versions = ">=3.6" files = [ - {file = "psutil-7.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0005da714eee687b4b8decd3d6cc7c6db36215c9e74e5ad2264b90c3df7d92dc"}, - {file = "psutil-7.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19644c85dcb987e35eeeaefdc3915d059dac7bd1167cdcdbf27e0ce2df0c08c0"}, - {file = "psutil-7.1.3-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95ef04cf2e5ba0ab9eaafc4a11eaae91b44f4ef5541acd2ee91d9108d00d59a7"}, - {file = "psutil-7.1.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1068c303be3a72f8e18e412c5b2a8f6d31750fb152f9cb106b54090296c9d251"}, - {file = "psutil-7.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:18349c5c24b06ac5612c0428ec2a0331c26443d259e2a0144a9b24b4395b58fa"}, - {file = "psutil-7.1.3-cp313-cp313t-win_arm64.whl", hash = "sha256:c525ffa774fe4496282fb0b1187725793de3e7c6b29e41562733cae9ada151ee"}, - {file = "psutil-7.1.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b403da1df4d6d43973dc004d19cee3b848e998ae3154cc8097d139b77156c353"}, - {file = "psutil-7.1.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ad81425efc5e75da3f39b3e636293360ad8d0b49bed7df824c79764fb4ba9b8b"}, - {file = "psutil-7.1.3-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f33a3702e167783a9213db10ad29650ebf383946e91bc77f28a5eb083496bc9"}, - {file = "psutil-7.1.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fac9cd332c67f4422504297889da5ab7e05fd11e3c4392140f7370f4208ded1f"}, - {file = "psutil-7.1.3-cp314-cp314t-win_amd64.whl", hash = "sha256:3792983e23b69843aea49c8f5b8f115572c5ab64c153bada5270086a2123c7e7"}, - {file = "psutil-7.1.3-cp314-cp314t-win_arm64.whl", hash = "sha256:31d77fcedb7529f27bb3a0472bea9334349f9a04160e8e6e5020f22c59893264"}, - {file = "psutil-7.1.3-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2bdbcd0e58ca14996a42adf3621a6244f1bb2e2e528886959c72cf1e326677ab"}, - {file = "psutil-7.1.3-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31fa00f1fbc3c3802141eede66f3a2d51d89716a194bf2cd6fc68310a19880"}, - {file = "psutil-7.1.3-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bb428f9f05c1225a558f53e30ccbad9930b11c3fc206836242de1091d3e7dd3"}, - {file = "psutil-7.1.3-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56d974e02ca2c8eb4812c3f76c30e28836fffc311d55d979f1465c1feeb2b68b"}, - {file = "psutil-7.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:f39c2c19fe824b47484b96f9692932248a54c43799a84282cfe58d05a6449efd"}, - {file = "psutil-7.1.3-cp37-abi3-win_arm64.whl", hash = "sha256:bd0d69cee829226a761e92f28140bec9a5ee9d5b4fb4b0cc589068dbfff559b1"}, - {file = "psutil-7.1.3.tar.gz", hash = "sha256:6c86281738d77335af7aec228328e944b30930899ea760ecf33a4dba66be5e74"}, -] - -[package.extras] -dev = ["abi3audit", "black", "check-manifest", "colorama", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pyreadline", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-xdist", "pywin32", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "validate-pyproject[all]", "virtualenv", "vulture", "wheel", "wheel", "wmi"] -test = ["pytest", "pytest-instafail", "pytest-subtests", "pytest-xdist", "pywin32", "setuptools", "wheel", "wmi"] + {file = "psutil-7.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ba9f33bb525b14c3ea563b2fd521a84d2fa214ec59e3e6a2858f78d0844dd60d"}, + {file = "psutil-7.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:81442dac7abfc2f4f4385ea9e12ddf5a796721c0f6133260687fec5c3780fa49"}, + {file = "psutil-7.2.1-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ea46c0d060491051d39f0d2cff4f98d5c72b288289f57a21556cc7d504db37fc"}, + {file = "psutil-7.2.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35630d5af80d5d0d49cfc4d64c1c13838baf6717a13effb35869a5919b854cdf"}, + {file = "psutil-7.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:923f8653416604e356073e6e0bccbe7c09990acef442def2f5640dd0faa9689f"}, + {file = "psutil-7.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cfbe6b40ca48019a51827f20d830887b3107a74a79b01ceb8cc8de4ccb17b672"}, + {file = "psutil-7.2.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:494c513ccc53225ae23eec7fe6e1482f1b8a44674241b54561f755a898650679"}, + {file = "psutil-7.2.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3fce5f92c22b00cdefd1645aa58ab4877a01679e901555067b1bd77039aa589f"}, + {file = "psutil-7.2.1-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93f3f7b0bb07711b49626e7940d6fe52aa9940ad86e8f7e74842e73189712129"}, + {file = "psutil-7.2.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d34d2ca888208eea2b5c68186841336a7f5e0b990edec929be909353a202768a"}, + {file = "psutil-7.2.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2ceae842a78d1603753561132d5ad1b2f8a7979cb0c283f5b52fb4e6e14b1a79"}, + {file = "psutil-7.2.1-cp314-cp314t-win_arm64.whl", hash = "sha256:08a2f175e48a898c8eb8eace45ce01777f4785bc744c90aa2cc7f2fa5462a266"}, + {file = "psutil-7.2.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2e953fcfaedcfbc952b44744f22d16575d3aa78eb4f51ae74165b4e96e55f42"}, + {file = "psutil-7.2.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:05cc68dbb8c174828624062e73078e7e35406f4ca2d0866c272c2410d8ef06d1"}, + {file = "psutil-7.2.1-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e38404ca2bb30ed7267a46c02f06ff842e92da3bb8c5bfdadbd35a5722314d8"}, + {file = "psutil-7.2.1-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ab2b98c9fc19f13f59628d94df5cc4cc4844bc572467d113a8b517d634e362c6"}, + {file = "psutil-7.2.1-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f78baafb38436d5a128f837fab2d92c276dfb48af01a240b861ae02b2413ada8"}, + {file = "psutil-7.2.1-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:99a4cd17a5fdd1f3d014396502daa70b5ec21bf4ffe38393e152f8e449757d67"}, + {file = "psutil-7.2.1-cp37-abi3-win_amd64.whl", hash = "sha256:b1b0671619343aa71c20ff9767eced0483e4fc9e1f489d50923738caf6a03c17"}, + {file = "psutil-7.2.1-cp37-abi3-win_arm64.whl", hash = "sha256:0d67c1822c355aa6f7314d92018fb4268a76668a536f133599b91edd48759442"}, + {file = "psutil-7.2.1.tar.gz", hash = "sha256:f7583aec590485b43ca601dd9cea0dcd65bd7bb21d30ef4ddbf4ea6b5ed1bdd3"}, +] + +[package.extras] +dev = ["abi3audit", "black", "check-manifest", "coverage", "packaging", "psleak", "pylint", "pyperf", "pypinfo", "pytest", "pytest-cov", "pytest-instafail", "pytest-xdist", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "validate-pyproject[all]", "virtualenv", "vulture", "wheel"] +test = ["psleak", "pytest", "pytest-instafail", "pytest-xdist", "setuptools"] [[package]] name = "ptyprocess" @@ -1979,13 +1979,13 @@ files = [ [[package]] name = "pydantic" -version = "2.12.4" +version = "2.12.5" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" files = [ - {file = "pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e"}, - {file = "pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac"}, + {file = "pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d"}, + {file = "pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49"}, ] [package.dependencies] @@ -2830,13 +2830,13 @@ files = [ [[package]] name = "send2trash" -version = "1.8.3" +version = "2.0.0" description = "Send file to trash natively under Mac OS X, Windows and Linux" optional = true -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*" files = [ - {file = "Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9"}, - {file = "Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf"}, + {file = "send2trash-2.0.0-py3-none-any.whl", hash = "sha256:e70d5ce41dbb890882cc78bc25d137478330b39a391e756fadf82e34da4d85b8"}, + {file = "send2trash-2.0.0.tar.gz", hash = "sha256:1761421da3f9930bfe51ed7c45343948573383ad4c27e3acebc91be324e7770d"}, ] [package.extras] @@ -2875,17 +2875,6 @@ files = [ {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] -[[package]] -name = "sniffio" -version = "1.3.1" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -files = [ - {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, - {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, -] - [[package]] name = "snowballstemmer" version = "3.0.1" @@ -2919,13 +2908,13 @@ numpy = ["NumPy"] [[package]] name = "soupsieve" -version = "2.8" +version = "2.8.1" description = "A modern CSS selector implementation for Beautiful Soup." optional = true python-versions = ">=3.9" files = [ - {file = "soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c"}, - {file = "soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f"}, + {file = "soupsieve-2.8.1-py3-none-any.whl", hash = "sha256:a11fe2a6f3d76ab3cf2de04eb339c1be5b506a8a47f2ceb6d139803177f85434"}, + {file = "soupsieve-2.8.1.tar.gz", hash = "sha256:4cf733bc50fa805f5df4b8ef4740fc0e0fa6218cf3006269afd3f9d6d80fd350"}, ] [[package]] @@ -3018,85 +3007,90 @@ files = [ [[package]] name = "tomli" -version = "2.3.0" +version = "2.4.0" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" files = [ - {file = "tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45"}, - {file = "tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba"}, - {file = "tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf"}, - {file = "tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441"}, - {file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845"}, - {file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c"}, - {file = "tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456"}, - {file = "tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be"}, - {file = "tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac"}, - {file = "tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22"}, - {file = "tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f"}, - {file = "tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52"}, - {file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8"}, - {file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6"}, - {file = "tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876"}, - {file = "tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878"}, - {file = "tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b"}, - {file = "tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae"}, - {file = "tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b"}, - {file = "tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf"}, - {file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f"}, - {file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05"}, - {file = "tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606"}, - {file = "tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999"}, - {file = "tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e"}, - {file = "tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3"}, - {file = "tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc"}, - {file = "tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0"}, - {file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879"}, - {file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005"}, - {file = "tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463"}, - {file = "tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8"}, - {file = "tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77"}, - {file = "tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf"}, - {file = "tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530"}, - {file = "tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b"}, - {file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67"}, - {file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f"}, - {file = "tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0"}, - {file = "tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba"}, - {file = "tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b"}, - {file = "tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549"}, + {file = "tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867"}, + {file = "tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9"}, + {file = "tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95"}, + {file = "tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76"}, + {file = "tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d"}, + {file = "tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576"}, + {file = "tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a"}, + {file = "tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa"}, + {file = "tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614"}, + {file = "tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1"}, + {file = "tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8"}, + {file = "tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a"}, + {file = "tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1"}, + {file = "tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b"}, + {file = "tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51"}, + {file = "tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729"}, + {file = "tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da"}, + {file = "tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3"}, + {file = "tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0"}, + {file = "tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e"}, + {file = "tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4"}, + {file = "tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e"}, + {file = "tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c"}, + {file = "tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f"}, + {file = "tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86"}, + {file = "tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87"}, + {file = "tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132"}, + {file = "tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6"}, + {file = "tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc"}, + {file = "tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66"}, + {file = "tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d"}, + {file = "tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702"}, + {file = "tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8"}, + {file = "tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776"}, + {file = "tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475"}, + {file = "tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2"}, + {file = "tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9"}, + {file = "tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0"}, + {file = "tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df"}, + {file = "tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d"}, + {file = "tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f"}, + {file = "tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b"}, + {file = "tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087"}, + {file = "tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd"}, + {file = "tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4"}, + {file = "tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a"}, + {file = "tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c"}, ] [[package]] name = "tomlkit" -version = "0.13.3" +version = "0.14.0" description = "Style preserving TOML library" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0"}, - {file = "tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1"}, + {file = "tomlkit-0.14.0-py3-none-any.whl", hash = "sha256:592064ed85b40fa213469f81ac584f67a4f2992509a7c3ea2d632208623a3680"}, + {file = "tomlkit-0.14.0.tar.gz", hash = "sha256:cf00efca415dbd57575befb1f6634c4f42d2d87dbba376128adb42c121b87064"}, ] [[package]] name = "tornado" -version = "6.5.2" +version = "6.5.4" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = false python-versions = ">=3.9" files = [ - {file = "tornado-6.5.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6"}, - {file = "tornado-6.5.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef"}, - {file = "tornado-6.5.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e"}, - {file = "tornado-6.5.2-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882"}, - {file = "tornado-6.5.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108"}, - {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c"}, - {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4"}, - {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04"}, - {file = "tornado-6.5.2-cp39-abi3-win32.whl", hash = "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0"}, - {file = "tornado-6.5.2-cp39-abi3-win_amd64.whl", hash = "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f"}, - {file = "tornado-6.5.2-cp39-abi3-win_arm64.whl", hash = "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af"}, - {file = "tornado-6.5.2.tar.gz", hash = "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0"}, + {file = "tornado-6.5.4-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d6241c1a16b1c9e4cc28148b1cda97dd1c6cb4fb7068ac1bedc610768dff0ba9"}, + {file = "tornado-6.5.4-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2d50f63dda1d2cac3ae1fa23d254e16b5e38153758470e9956cbc3d813d40843"}, + {file = "tornado-6.5.4-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1cf66105dc6acb5af613c054955b8137e34a03698aa53272dbda4afe252be17"}, + {file = "tornado-6.5.4-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50ff0a58b0dc97939d29da29cd624da010e7f804746621c78d14b80238669335"}, + {file = "tornado-6.5.4-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5fb5e04efa54cf0baabdd10061eb4148e0be137166146fff835745f59ab9f7f"}, + {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9c86b1643b33a4cd415f8d0fe53045f913bf07b4a3ef646b735a6a86047dda84"}, + {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:6eb82872335a53dd063a4f10917b3efd28270b56a33db69009606a0312660a6f"}, + {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6076d5dda368c9328ff41ab5d9dd3608e695e8225d1cd0fd1e006f05da3635a8"}, + {file = "tornado-6.5.4-cp39-abi3-win32.whl", hash = "sha256:1768110f2411d5cd281bac0a090f707223ce77fd110424361092859e089b38d1"}, + {file = "tornado-6.5.4-cp39-abi3-win_amd64.whl", hash = "sha256:fa07d31e0cd85c60713f2b995da613588aa03e1303d75705dca6af8babc18ddc"}, + {file = "tornado-6.5.4-cp39-abi3-win_arm64.whl", hash = "sha256:053e6e16701eb6cbe641f308f4c1a9541f91b6261991160391bfc342e8a551a1"}, + {file = "tornado-6.5.4.tar.gz", hash = "sha256:a22fa9047405d03260b483980635f0b041989d8bcc9a313f8fe18b411d84b1d7"}, ] [[package]] @@ -3138,13 +3132,13 @@ files = [ [[package]] name = "types-requests" -version = "2.32.4.20250913" +version = "2.32.4.20260107" description = "Typing stubs for requests" optional = false python-versions = ">=3.9" files = [ - {file = "types_requests-2.32.4.20250913-py3-none-any.whl", hash = "sha256:78c9c1fffebbe0fa487a418e0fa5252017e9c60d1a2da394077f1780f655d7e1"}, - {file = "types_requests-2.32.4.20250913.tar.gz", hash = "sha256:abd6d4f9ce3a9383f269775a9835a4c24e5cd6b9f647d64f88aa4613c33def5d"}, + {file = "types_requests-2.32.4.20260107-py3-none-any.whl", hash = "sha256:b703fe72f8ce5b31ef031264fe9395cac8f46a04661a79f7ed31a80fb308730d"}, + {file = "types_requests-2.32.4.20260107.tar.gz", hash = "sha256:018a11ac158f801bfa84857ddec1650750e393df8a004a8a9ae2a9bec6fcb24f"}, ] [package.dependencies] @@ -3177,13 +3171,13 @@ typing-extensions = ">=4.12.0" [[package]] name = "tzdata" -version = "2025.2" +version = "2025.3" description = "Provider of IANA time zone data" optional = true python-versions = ">=2" files = [ - {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, - {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, + {file = "tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1"}, + {file = "tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7"}, ] [[package]] @@ -3202,20 +3196,20 @@ dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake [[package]] name = "urllib3" -version = "2.5.0" +version = "2.6.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" files = [ - {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, - {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, + {file = "urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4"}, + {file = "urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.2.0)", "brotlicffi (>=1.2.0.0)"] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] +zstd = ["backports-zstd (>=1.0.0)"] [[package]] name = "wcwidth" @@ -3268,97 +3262,80 @@ test = ["pytest", "websockets"] [[package]] name = "websockets" -version = "13.1" +version = "15.0.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, - {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, - {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, - {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, - {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, - {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, - {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, - {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, - {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, - {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, - {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, - {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, - {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, - {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, - {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, - {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, - {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, - {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, - {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, - {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, - {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, - {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, - {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, - {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, - {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, - {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, - {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, - {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, - {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, - {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, - {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, - {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, - {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, - {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, - {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, - {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, - {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, - {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, - {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, - {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, - {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, - {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, - {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, - {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, - {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, - {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, - {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, - {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, - {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, - {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, - {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, - {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, - {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, - {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, - {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, - {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, - {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, - {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, - {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, - {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, - {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, - {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, - {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, - {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, - {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, - {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, - {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, - {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, - {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, - {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, - {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, - {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, - {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, - {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, - {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, - {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, - {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, - {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, - {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, - {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, - {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, - {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, - {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, - {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, - {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, - {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, + {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b"}, + {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205"}, + {file = "websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a"}, + {file = "websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e"}, + {file = "websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf"}, + {file = "websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb"}, + {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d"}, + {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9"}, + {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c"}, + {file = "websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256"}, + {file = "websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41"}, + {file = "websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431"}, + {file = "websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57"}, + {file = "websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905"}, + {file = "websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562"}, + {file = "websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792"}, + {file = "websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413"}, + {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8"}, + {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3"}, + {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf"}, + {file = "websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85"}, + {file = "websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065"}, + {file = "websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3"}, + {file = "websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665"}, + {file = "websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2"}, + {file = "websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215"}, + {file = "websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5"}, + {file = "websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65"}, + {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe"}, + {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4"}, + {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597"}, + {file = "websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9"}, + {file = "websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7"}, + {file = "websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931"}, + {file = "websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675"}, + {file = "websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151"}, + {file = "websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22"}, + {file = "websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f"}, + {file = "websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8"}, + {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375"}, + {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d"}, + {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4"}, + {file = "websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa"}, + {file = "websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561"}, + {file = "websockets-15.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5"}, + {file = "websockets-15.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a"}, + {file = "websockets-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b"}, + {file = "websockets-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770"}, + {file = "websockets-15.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb"}, + {file = "websockets-15.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054"}, + {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee"}, + {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed"}, + {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880"}, + {file = "websockets-15.0.1-cp39-cp39-win32.whl", hash = "sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411"}, + {file = "websockets-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123"}, + {file = "websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f"}, + {file = "websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee"}, ] [[package]] @@ -3488,4 +3465,4 @@ microphone = ["sounddevice"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "dc6a0b8b9b35b2ea915847c943e865f7eda885795ef884b449346c477a19d83f" +content-hash = "0ec4eef60479d73e9ed22b1b6576d7e8745363c006bbe425285f6cd802f47e3a" diff --git a/pyproject.toml b/pyproject.toml index 675b724b..2573b145 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ dynamic = ["version"] [tool.poetry] name = "hume" -version = "0.13.6" +version = "0.13.7" description = "A Python SDK for Hume AI" readme = "README.md" authors = [] @@ -69,7 +69,7 @@ pydantic = ">= 1.9.2" pydantic-core = ">=2.18.2" sounddevice = { version = "^0.4.6", optional = true} typing_extensions = ">= 4.0.0" -websockets = "^13.1" +websockets = ">=12.0" [tool.poetry.group.dev.dependencies] mypy = "==1.13.0" diff --git a/reference.md b/reference.md index 595ceb89..96f146d3 100644 --- a/reference.md +++ b/reference.md @@ -1,6 +1,6 @@ # Reference ## Tts -
client.tts.synthesize_json(...) -> AsyncHttpResponse[ReturnTts] +
client.tts.synthesize_json(...) -> AsyncHttpResponse[ReturnTts]
@@ -174,7 +174,7 @@ For a comparison of Octave versions, see the [Octave versions](/docs/text-to-spe
-
client.tts.synthesize_file(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]] +
client.tts.synthesize_file(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
@@ -343,7 +343,7 @@ For a comparison of Octave versions, see the [Octave versions](/docs/text-to-spe
-
client.tts.synthesize_file_streaming(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]] +
client.tts.synthesize_file_streaming(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
@@ -508,7 +508,7 @@ For a comparison of Octave versions, see the [Octave versions](/docs/text-to-spe
-
client.tts.synthesize_json_streaming(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[TtsOutput]]] +
client.tts.synthesize_json_streaming(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[TtsOutput]]]
@@ -677,7 +677,7 @@ For a comparison of Octave versions, see the [Octave versions](/docs/text-to-spe
-
client.tts.convert_voice_json(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[TtsOutput]]] +
client.tts.convert_voice_json(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[TtsOutput]]]
@@ -776,7 +776,7 @@ typing.Optional[core.File]` — See core.File for more documentation
## Tts Voices -
client.tts.voices.list(...) -> AsyncPager[ReturnVoice, ReturnPagedVoices] +
client.tts.voices.list(...) -> AsyncPager[ReturnVoice, ReturnPagedVoices]
@@ -896,7 +896,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.tts.voices.create(...) -> AsyncHttpResponse[ReturnVoice] +
client.tts.voices.create(...) -> AsyncHttpResponse[ReturnVoice]
@@ -977,7 +977,7 @@ client.tts.voices.create(
-
client.tts.voices.delete(...) -> AsyncHttpResponse[None] +
client.tts.voices.delete(...) -> AsyncHttpResponse[None]
@@ -1048,7 +1048,7 @@ client.tts.voices.delete(
## EmpathicVoice ControlPlane -
client.empathic_voice.control_plane.send(...) -> AsyncHttpResponse[None] +
client.empathic_voice.control_plane.send(...) -> AsyncHttpResponse[None]
@@ -1129,7 +1129,7 @@ client.empathic_voice.control_plane.send(
## EmpathicVoice ChatGroups -
client.empathic_voice.chat_groups.list_chat_groups(...) -> AsyncPager[ReturnChatGroup, ReturnPagedChatGroups] +
client.empathic_voice.chat_groups.list_chat_groups(...) -> AsyncPager[ReturnChatGroup, ReturnPagedChatGroups]
@@ -1225,7 +1225,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.chat_groups.get_chat_group(...) -> AsyncHttpResponse[ReturnChatGroupPagedChats] +
client.empathic_voice.chat_groups.get_chat_group(...) -> AsyncHttpResponse[ReturnChatGroupPagedChats]
@@ -1324,7 +1324,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-
client.empathic_voice.chat_groups.get_audio(...) -> AsyncHttpResponse[ReturnChatGroupPagedAudioReconstructions] +
client.empathic_voice.chat_groups.get_audio(...) -> AsyncHttpResponse[ReturnChatGroupPagedAudioReconstructions]
@@ -1415,7 +1415,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.chat_groups.list_chat_group_events(...) -> AsyncPager[ReturnChatEvent, ReturnChatGroupPagedEvents] +
client.empathic_voice.chat_groups.list_chat_group_events(...) -> AsyncPager[ReturnChatEvent, ReturnChatGroupPagedEvents]
@@ -1512,7 +1512,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
## EmpathicVoice Chats -
client.empathic_voice.chats.list_chats(...) -> AsyncPager[ReturnChat, ReturnPagedChats] +
client.empathic_voice.chats.list_chats(...) -> AsyncPager[ReturnChat, ReturnPagedChats]
@@ -1615,7 +1615,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.chats.list_chat_events(...) -> AsyncPager[ReturnChatEvent, ReturnChatPagedEvents] +
client.empathic_voice.chats.list_chat_events(...) -> AsyncPager[ReturnChatEvent, ReturnChatPagedEvents]
@@ -1711,7 +1711,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-
client.empathic_voice.chats.get_audio(...) -> AsyncHttpResponse[ReturnChatAudioReconstruction] +
client.empathic_voice.chats.get_audio(...) -> AsyncHttpResponse[ReturnChatAudioReconstruction]
@@ -1768,7 +1768,7 @@ client.empathic_voice.chats.get_audio(
## EmpathicVoice Configs -
client.empathic_voice.configs.list_configs(...) -> AsyncPager[ReturnConfig, ReturnPagedConfigs] +
client.empathic_voice.configs.list_configs(...) -> AsyncPager[ReturnConfig, ReturnPagedConfigs]
@@ -1862,7 +1862,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config(...) -> AsyncHttpResponse[ReturnConfig] +
client.empathic_voice.configs.create_config(...) -> AsyncHttpResponse[ReturnConfig]
@@ -2049,7 +2049,7 @@ client.empathic_voice.configs.create_config(
-
client.empathic_voice.configs.list_config_versions(...) -> AsyncPager[ReturnConfig, ReturnPagedConfigs] +
client.empathic_voice.configs.list_config_versions(...) -> AsyncPager[ReturnConfig, ReturnPagedConfigs]
@@ -2142,7 +2142,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config_version(...) -> AsyncHttpResponse[ReturnConfig] +
client.empathic_voice.configs.create_config_version(...) -> AsyncHttpResponse[ReturnConfig]
@@ -2334,7 +2334,7 @@ client.empathic_voice.configs.create_config_version(
-
client.empathic_voice.configs.delete_config(...) -> AsyncHttpResponse[None] +
client.empathic_voice.configs.delete_config(...) -> AsyncHttpResponse[None]
@@ -2390,7 +2390,7 @@ client.empathic_voice.configs.delete_config(
-
client.empathic_voice.configs.update_config_name(...) -> AsyncHttpResponse[str] +
client.empathic_voice.configs.update_config_name(...) -> AsyncHttpResponse[str]
@@ -2455,7 +2455,7 @@ client.empathic_voice.configs.update_config_name(
-
client.empathic_voice.configs.get_config_version(...) -> AsyncHttpResponse[ReturnConfig] +
client.empathic_voice.configs.get_config_version(...) -> AsyncHttpResponse[ReturnConfig]
@@ -2520,7 +2520,7 @@ client.empathic_voice.configs.get_config_version(
-
client.empathic_voice.configs.delete_config_version(...) -> AsyncHttpResponse[None] +
client.empathic_voice.configs.delete_config_version(...) -> AsyncHttpResponse[None]
@@ -2585,7 +2585,7 @@ client.empathic_voice.configs.delete_config_version(
-
client.empathic_voice.configs.update_config_description(...) -> AsyncHttpResponse[ReturnConfig] +
client.empathic_voice.configs.update_config_description(...) -> AsyncHttpResponse[ReturnConfig]
@@ -2660,7 +2660,7 @@ client.empathic_voice.configs.update_config_description(
## EmpathicVoice Prompts -
client.empathic_voice.prompts.list_prompts(...) -> AsyncPager[typing.Optional[ReturnPrompt], ReturnPagedPrompts] +
client.empathic_voice.prompts.list_prompts(...) -> AsyncPager[typing.Optional[ReturnPrompt], ReturnPagedPrompts]
@@ -2754,7 +2754,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]] +
client.empathic_voice.prompts.create_prompt(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]]
@@ -2827,7 +2827,7 @@ client.empathic_voice.prompts.create_prompt(
-
client.empathic_voice.prompts.list_prompt_versions(...) -> AsyncHttpResponse[ReturnPagedPrompts] +
client.empathic_voice.prompts.list_prompt_versions(...) -> AsyncHttpResponse[ReturnPagedPrompts]
@@ -2915,7 +2915,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt_version(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]] +
client.empathic_voice.prompts.create_prompt_version(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]]
@@ -2989,7 +2989,7 @@ client.empathic_voice.prompts.create_prompt_version(
-
client.empathic_voice.prompts.delete_prompt(...) -> AsyncHttpResponse[None] +
client.empathic_voice.prompts.delete_prompt(...) -> AsyncHttpResponse[None]
@@ -3045,7 +3045,7 @@ client.empathic_voice.prompts.delete_prompt(
-
client.empathic_voice.prompts.update_prompt_name(...) -> AsyncHttpResponse[str] +
client.empathic_voice.prompts.update_prompt_name(...) -> AsyncHttpResponse[str]
@@ -3110,7 +3110,7 @@ client.empathic_voice.prompts.update_prompt_name(
-
client.empathic_voice.prompts.get_prompt_version(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]] +
client.empathic_voice.prompts.get_prompt_version(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]]
@@ -3175,7 +3175,7 @@ client.empathic_voice.prompts.get_prompt_version(
-
client.empathic_voice.prompts.delete_prompt_version(...) -> AsyncHttpResponse[None] +
client.empathic_voice.prompts.delete_prompt_version(...) -> AsyncHttpResponse[None]
@@ -3240,7 +3240,7 @@ client.empathic_voice.prompts.delete_prompt_version(
-
client.empathic_voice.prompts.update_prompt_description(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]] +
client.empathic_voice.prompts.update_prompt_description(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]]
@@ -3315,7 +3315,7 @@ client.empathic_voice.prompts.update_prompt_description(
## EmpathicVoice Tools -
client.empathic_voice.tools.list_tools(...) -> AsyncPager[typing.Optional[ReturnUserDefinedTool], ReturnPagedUserDefinedTools] +
client.empathic_voice.tools.list_tools(...) -> AsyncPager[typing.Optional[ReturnUserDefinedTool], ReturnPagedUserDefinedTools]
@@ -3409,7 +3409,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]] +
client.empathic_voice.tools.create_tool(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]]
@@ -3501,7 +3501,7 @@ client.empathic_voice.tools.create_tool(
-
client.empathic_voice.tools.list_tool_versions(...) -> AsyncPager[typing.Optional[ReturnUserDefinedTool], ReturnPagedUserDefinedTools] +
client.empathic_voice.tools.list_tool_versions(...) -> AsyncPager[typing.Optional[ReturnUserDefinedTool], ReturnPagedUserDefinedTools]
@@ -3594,7 +3594,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool_version(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]] +
client.empathic_voice.tools.create_tool_version(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]]
@@ -3686,7 +3686,7 @@ client.empathic_voice.tools.create_tool_version(
-
client.empathic_voice.tools.delete_tool(...) -> AsyncHttpResponse[None] +
client.empathic_voice.tools.delete_tool(...) -> AsyncHttpResponse[None]
@@ -3742,7 +3742,7 @@ client.empathic_voice.tools.delete_tool(
-
client.empathic_voice.tools.update_tool_name(...) -> AsyncHttpResponse[str] +
client.empathic_voice.tools.update_tool_name(...) -> AsyncHttpResponse[str]
@@ -3807,7 +3807,7 @@ client.empathic_voice.tools.update_tool_name(
-
client.empathic_voice.tools.get_tool_version(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]] +
client.empathic_voice.tools.get_tool_version(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]]
@@ -3872,7 +3872,7 @@ client.empathic_voice.tools.get_tool_version(
-
client.empathic_voice.tools.delete_tool_version(...) -> AsyncHttpResponse[None] +
client.empathic_voice.tools.delete_tool_version(...) -> AsyncHttpResponse[None]
@@ -3937,7 +3937,7 @@ client.empathic_voice.tools.delete_tool_version(
-
client.empathic_voice.tools.update_tool_description(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]] +
client.empathic_voice.tools.update_tool_description(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]]
@@ -4012,7 +4012,7 @@ client.empathic_voice.tools.update_tool_description(
## ExpressionMeasurement Batch -
client.expression_measurement.batch.list_jobs(...) -> AsyncHttpResponse[typing.List[UnionJob]] +
client.expression_measurement.batch.list_jobs(...) -> AsyncHttpResponse[typing.List[UnionJob]]
@@ -4148,7 +4148,7 @@ Specify the order in which to sort the jobs. Defaults to descending order.
-
client.expression_measurement.batch.start_inference_job(...) -> AsyncHttpResponse[str] +
client.expression_measurement.batch.start_inference_job(...) -> AsyncHttpResponse[str]
@@ -4267,7 +4267,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-
client.expression_measurement.batch.get_job_details(...) -> AsyncHttpResponse[UnionJob] +
client.expression_measurement.batch.get_job_details(...) -> AsyncHttpResponse[UnionJob]
@@ -4337,7 +4337,7 @@ client.expression_measurement.batch.get_job_details(
-
client.expression_measurement.batch.get_job_predictions(...) -> AsyncHttpResponse[typing.List[UnionPredictResult]] +
client.expression_measurement.batch.get_job_predictions(...) -> AsyncHttpResponse[typing.List[UnionPredictResult]]
@@ -4407,7 +4407,7 @@ client.expression_measurement.batch.get_job_predictions(
-
client.expression_measurement.batch.start_inference_job_from_local_file(...) -> AsyncHttpResponse[str] +
client.expression_measurement.batch.start_inference_job_from_local_file(...) -> AsyncHttpResponse[str]
diff --git a/src/hume/core/client_wrapper.py b/src/hume/core/client_wrapper.py index 75595bc0..c23a9dbb 100644 --- a/src/hume/core/client_wrapper.py +++ b/src/hume/core/client_wrapper.py @@ -23,10 +23,10 @@ def __init__( def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { - "User-Agent": "hume/0.13.6", + "User-Agent": "hume/0.13.7", "X-Fern-Language": "Python", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.13.6", + "X-Fern-SDK-Version": "0.13.7", **(self.get_custom_headers() or {}), } if self.api_key is not None: diff --git a/src/hume/empathic_voice/chat/client.py.diff b/src/hume/empathic_voice/chat/client.py.diff deleted file mode 100644 index aa685fdd..00000000 --- a/src/hume/empathic_voice/chat/client.py.diff +++ /dev/null @@ -1,87 +0,0 @@ -diff --git a/src/hume/empathic_voice/chat/client.py b/src/hume/empathic_voice/chat/client.py -index 43a9cf28..8ec7e4cd 100644 ---- a/src/hume/empathic_voice/chat/client.py -+++ b/src/hume/empathic_voice/chat/client.py -@@ -10,7 +10,6 @@ from ...core.api_error import ApiError - from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper - from ...core.request_options import RequestOptions - from ...core.serialization import convert_and_respect_annotation_metadata --from ...core.query_encoder import single_query_encoder - from ..types.connect_session_settings import ConnectSessionSettings - from .raw_client import AsyncRawChatClient, RawChatClient - from .socket_client import AsyncChatSocketClient, ChatSocketClient -@@ -48,7 +47,7 @@ class ChatClient: - resumed_chat_group_id: typing.Optional[str] = None, - verbose_transcription: typing.Optional[bool] = None, - api_key: typing.Optional[str] = None, -- session_settings: typing.Optional[ConnectSessionSettings] = None, -+ session_settings: ConnectSessionSettings, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Iterator[ChatSocketClient]: - """ -@@ -130,10 +129,18 @@ class ChatClient: - query_params = query_params.add("verbose_transcription", verbose_transcription) - if api_key is not None: - query_params = query_params.add("api_key", api_key) -- if session_settings is not None: -- flattened_params = single_query_encoder("session_settings", session_settings) -- for param_key, param_value in flattened_params: -- query_params = query_params.add(param_key, param_value) -+ if ( -+ convert_and_respect_annotation_metadata( -+ object_=session_settings, annotation=ConnectSessionSettings, direction="write" -+ ) -+ is not None -+ ): -+ query_params = query_params.add( -+ "session_settings", -+ convert_and_respect_annotation_metadata( -+ object_=session_settings, annotation=ConnectSessionSettings, direction="write" -+ ), -+ ) - ws_url = ws_url + f"?{query_params}" - headers = self._raw_client._client_wrapper.get_headers() - if request_options and "additional_headers" in request_options: -@@ -183,7 +190,7 @@ class AsyncChatClient: - resumed_chat_group_id: typing.Optional[str] = None, - verbose_transcription: typing.Optional[bool] = None, - api_key: typing.Optional[str] = None, -- session_settings: typing.Optional[ConnectSessionSettings] = None, -+ session_settings: ConnectSessionSettings, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.AsyncIterator[AsyncChatSocketClient]: - """ -@@ -238,7 +245,7 @@ class AsyncChatClient: - - api_key : typing.Optional[str] - -- session_settings : typing.Optional[ConnectSessionSettings] -+ session_settings : ConnectSessionSettings - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. -@@ -265,12 +272,18 @@ class AsyncChatClient: - query_params = query_params.add("verbose_transcription", verbose_transcription) - if api_key is not None: - query_params = query_params.add("api_key", api_key) -- -- if session_settings is not None: -- flattened_params = single_query_encoder("session_settings", session_settings) -- for param_key, param_value in flattened_params: -- query_params = query_params.add(param_key, param_value) -- -+ if ( -+ convert_and_respect_annotation_metadata( -+ object_=session_settings, annotation=ConnectSessionSettings, direction="write" -+ ) -+ is not None -+ ): -+ query_params = query_params.add( -+ "session_settings", -+ convert_and_respect_annotation_metadata( -+ object_=session_settings, annotation=ConnectSessionSettings, direction="write" -+ ), -+ ) - ws_url = ws_url + f"?{query_params}" - headers = self._raw_client._client_wrapper.get_headers() - if request_options and "additional_headers" in request_options: diff --git a/src/hume/empathic_voice/chat/raw_client.py.diff b/src/hume/empathic_voice/chat/raw_client.py.diff deleted file mode 100644 index 14716f92..00000000 --- a/src/hume/empathic_voice/chat/raw_client.py.diff +++ /dev/null @@ -1,176 +0,0 @@ -diff --git a/src/hume/empathic_voice/chat/raw_client.py b/src/hume/empathic_voice/chat/raw_client.py -index fefee870..23e2b4a0 100644 ---- a/src/hume/empathic_voice/chat/raw_client.py -+++ b/src/hume/empathic_voice/chat/raw_client.py -@@ -10,7 +10,6 @@ from ...core.api_error import ApiError - from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper - from ...core.request_options import RequestOptions - from ...core.serialization import convert_and_respect_annotation_metadata --from ...core.query_encoder import single_query_encoder - from ..types.connect_session_settings import ConnectSessionSettings - from .socket_client import AsyncChatSocketClient, ChatSocketClient - -@@ -29,6 +28,7 @@ class RawChatClient: - self, - *, - access_token: typing.Optional[str] = None, -+ allow_connection: typing.Optional[bool] = None, - config_id: typing.Optional[str] = None, - config_version: typing.Optional[int] = None, - event_limit: typing.Optional[int] = None, -@@ -50,12 +50,15 @@ class RawChatClient: - - For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). - -+ allow_connection : typing.Optional[bool] -+ Allows external connections to this chat via the /connect endpoint. -+ - config_id : typing.Optional[str] - The unique identifier for an EVI configuration. - -- Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/speech-to-speech-evi/configuration/build-a-configuration#default-configuration). -+ Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/empathic-voice-interface-evi/configuration#default-configuration). - -- For help obtaining this ID, see our [Configuration Guide](/docs/speech-to-speech-evi/configuration). -+ For help obtaining this ID, see our [Configuration Guide](/docs/empathic-voice-interface-evi/configuration). - - config_version : typing.Optional[int] - The version number of the EVI configuration specified by the `config_id`. -@@ -76,19 +79,16 @@ class RawChatClient: - - There are three ways to obtain the Chat Group ID: - -- - [Chat Metadata](/reference/speech-to-speech-evi/chat#receive.ChatMetadata): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. -+ - [Chat Metadata](/reference/empathic-voice-interface-evi/chat/chat#receive.Chat%20Metadata.type): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. - -- - [List Chats endpoint](/reference/speech-to-speech-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. -+ - [List Chats endpoint](/reference/empathic-voice-interface-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. - -- - [List Chat Groups endpoint](/reference/speech-to-speech-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. -+ - [List Chat Groups endpoint](/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. - - verbose_transcription : typing.Optional[bool] -- A flag to enable verbose transcription. Set this query parameter to `true` to have unfinalized user transcripts be sent to the client as interim UserMessage messages. The [interim](/reference/speech-to-speech-evi/chat#receive.UserMessage.interim) field on a [UserMessage](/reference/speech-to-speech-evi/chat#receive.UserMessage) denotes whether the message is "interim" or "final." -+ A flag to enable verbose transcription. Set this query parameter to `"true"` to have unfinalized user transcripts be sent to the client as interim `UserMessage` messages. - - api_key : typing.Optional[str] -- API key used for authenticating the client. If not provided, an `access_token` must be provided to authenticate. -- -- For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). - - session_settings : ConnectSessionSettings - -@@ -103,6 +103,8 @@ class RawChatClient: - query_params = httpx.QueryParams() - if access_token is not None: - query_params = query_params.add("access_token", access_token) -+ if allow_connection is not None: -+ query_params = query_params.add("allow_connection", allow_connection) - if config_id is not None: - query_params = query_params.add("config_id", config_id) - if config_version is not None: -@@ -115,10 +117,18 @@ class RawChatClient: - query_params = query_params.add("verbose_transcription", verbose_transcription) - if api_key is not None: - query_params = query_params.add("api_key", api_key) -- if session_settings is not None: -- flattened_params = single_query_encoder("session_settings", session_settings) -- for param_key, param_value in flattened_params: -- query_params = query_params.add(param_key, str(param_value)) -+ if ( -+ convert_and_respect_annotation_metadata( -+ object_=session_settings, annotation=ConnectSessionSettings, direction="write" -+ ) -+ is not None -+ ): -+ query_params = query_params.add( -+ "session_settings", -+ convert_and_respect_annotation_metadata( -+ object_=session_settings, annotation=ConnectSessionSettings, direction="write" -+ ), -+ ) - ws_url = ws_url + f"?{query_params}" - headers = self._client_wrapper.get_headers() - if request_options and "additional_headers" in request_options: -@@ -150,6 +160,7 @@ class AsyncRawChatClient: - self, - *, - access_token: typing.Optional[str] = None, -+ allow_connection: typing.Optional[bool] = None, - config_id: typing.Optional[str] = None, - config_version: typing.Optional[int] = None, - event_limit: typing.Optional[int] = None, -@@ -171,12 +182,15 @@ class AsyncRawChatClient: - - For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). - -+ allow_connection : typing.Optional[bool] -+ Allows external connections to this chat via the /connect endpoint. -+ - config_id : typing.Optional[str] - The unique identifier for an EVI configuration. - -- Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/speech-to-speech-evi/configuration/build-a-configuration#default-configuration). -+ Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/empathic-voice-interface-evi/configuration#default-configuration). - -- For help obtaining this ID, see our [Configuration Guide](/docs/speech-to-speech-evi/configuration). -+ For help obtaining this ID, see our [Configuration Guide](/docs/empathic-voice-interface-evi/configuration). - - config_version : typing.Optional[int] - The version number of the EVI configuration specified by the `config_id`. -@@ -197,19 +211,16 @@ class AsyncRawChatClient: - - There are three ways to obtain the Chat Group ID: - -- - [Chat Metadata](/reference/speech-to-speech-evi/chat#receive.ChatMetadata): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. -+ - [Chat Metadata](/reference/empathic-voice-interface-evi/chat/chat#receive.Chat%20Metadata.type): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. - -- - [List Chats endpoint](/reference/speech-to-speech-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. -+ - [List Chats endpoint](/reference/empathic-voice-interface-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. - -- - [List Chat Groups endpoint](/reference/speech-to-speech-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. -+ - [List Chat Groups endpoint](/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. - - verbose_transcription : typing.Optional[bool] -- A flag to enable verbose transcription. Set this query parameter to `true` to have unfinalized user transcripts be sent to the client as interim UserMessage messages. The [interim](/reference/speech-to-speech-evi/chat#receive.UserMessage.interim) field on a [UserMessage](/reference/speech-to-speech-evi/chat#receive.UserMessage) denotes whether the message is "interim" or "final." -+ A flag to enable verbose transcription. Set this query parameter to `"true"` to have unfinalized user transcripts be sent to the client as interim `UserMessage` messages. - - api_key : typing.Optional[str] -- API key used for authenticating the client. If not provided, an `access_token` must be provided to authenticate. -- -- For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). - - session_settings : ConnectSessionSettings - -@@ -224,6 +235,8 @@ class AsyncRawChatClient: - query_params = httpx.QueryParams() - if access_token is not None: - query_params = query_params.add("access_token", access_token) -+ if allow_connection is not None: -+ query_params = query_params.add("allow_connection", allow_connection) - if config_id is not None: - query_params = query_params.add("config_id", config_id) - if config_version is not None: -@@ -236,10 +249,18 @@ class AsyncRawChatClient: - query_params = query_params.add("verbose_transcription", verbose_transcription) - if api_key is not None: - query_params = query_params.add("api_key", api_key) -- if session_settings is not None: -- flattened_params = single_query_encoder("session_settings", session_settings) -- for param_key, param_value in flattened_params: -- query_params = query_params.add(param_key, str(param_value)) -+ if ( -+ convert_and_respect_annotation_metadata( -+ object_=session_settings, annotation=ConnectSessionSettings, direction="write" -+ ) -+ is not None -+ ): -+ query_params = query_params.add( -+ "session_settings", -+ convert_and_respect_annotation_metadata( -+ object_=session_settings, annotation=ConnectSessionSettings, direction="write" -+ ), -+ ) - ws_url = ws_url + f"?{query_params}" - headers = self._client_wrapper.get_headers() - if request_options and "additional_headers" in request_options: diff --git a/src/hume/empathic_voice/chat/socket_client.py.diff b/src/hume/empathic_voice/chat/socket_client.py.diff deleted file mode 100644 index f1d2621b..00000000 --- a/src/hume/empathic_voice/chat/socket_client.py.diff +++ /dev/null @@ -1,165 +0,0 @@ -diff --git a/src/hume/empathic_voice/chat/socket_client.py b/src/hume/empathic_voice/chat/socket_client.py -index d5ea6658..18ee74ab 100644 ---- a/src/hume/empathic_voice/chat/socket_client.py -+++ b/src/hume/empathic_voice/chat/socket_client.py -@@ -6,83 +6,18 @@ from json.decoder import JSONDecodeError - - import websockets - import websockets.sync.connection as websockets_sync_connection --from typing_extensions import deprecated --from contextlib import asynccontextmanager -- --from hume.empathic_voice.types.session_settings_message import SessionSettingsMessage -- - from ...core.events import EventEmitterMixin, EventType - from ...core.pydantic_utilities import parse_obj_as --from ..types.assistant_input import AssistantInput --from ..types.audio_input import AudioInput --from ..types.pause_assistant_message import PauseAssistantMessage --from ..types.resume_assistant_message import ResumeAssistantMessage --from ..types.session_settings import SessionSettings --from ..types.tool_error_message import ToolErrorMessage --from ..types.tool_response_message import ToolResponseMessage --from ..types.user_input import UserInput --from .types.publish_event import PublishEvent - from ..types.subscribe_event import SubscribeEvent -+from .types.publish_event import PublishEvent - - try: - from websockets.legacy.client import WebSocketClientProtocol # type: ignore - except ImportError: - from websockets import WebSocketClientProtocol # type: ignore - --ChatSocketClientResponse = SubscribeEvent -- --class ChatConnectSessionSettingsAudio(typing.TypedDict, total=False): -- channels: typing.Optional[int] -- encoding: typing.Optional[str] -- sample_rate: typing.Optional[int] -- -- --class ChatConnectSessionSettingsContext(typing.TypedDict, total=False): -- text: typing.Optional[str] -- -- --SessionSettingsVariablesValue = typing.Union[str, float, bool] -- --class ChatConnectSessionSettings(typing.TypedDict, total=False): -- audio: typing.Optional[ChatConnectSessionSettingsAudio] -- context: typing.Optional[ChatConnectSessionSettingsContext] -- custom_session_id: typing.Optional[str] -- event_limit: typing.Optional[int] -- language_model_api_key: typing.Optional[str] -- system_prompt: typing.Optional[str] -- variables: typing.Optional[typing.Dict[str, SessionSettingsVariablesValue]] -- voice_id: typing.Optional[str] -- --@deprecated("Use .connect() with kwargs instead.") --class ChatConnectOptions(typing.TypedDict, total=False): -- config_id: typing.Optional[str] -- """ -- The ID of the configuration. -- """ -- -- config_version: typing.Optional[str] -- """ -- The version of the configuration. -- """ -- -- api_key: typing.Optional[str] -- -- secret_key: typing.Optional[str] -- -- resumed_chat_group_id: typing.Optional[str] -- -- verbose_transcription: typing.Optional[bool] -+ChatSocketClientResponse = typing.Union[SubscribeEvent] - -- """ -- ID of the Voice to use for this chat. If specified, will override the voice set in the Config -- """ -- voice_id: typing.Optional[str] -- -- session_settings: typing.Optional[typing.Dict] -- """ -- Session settings to apply at connection time. Supports all SessionSettings fields except -- builtin_tools, type, metadata, and tools. Additionally supports event_limit. -- """ - - class AsyncChatSocketClient(EventEmitterMixin): - def __init__(self, *, websocket: WebSocketClientProtocol): -@@ -143,38 +78,6 @@ class AsyncChatSocketClient(EventEmitterMixin): - """ - await self._send(data.dict()) - -- @deprecated("Use send_publish instead.") -- async def send_audio_input(self, message: AudioInput) -> None: -- await self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- async def send_session_settings(self, message: SessionSettingsMessage) -> None: -- await self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- async def send_user_input(self, message: UserInput) -> None: -- await self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- async def send_assistant_input(self, message: AssistantInput) -> None: -- await self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- async def send_tool_response(self, message: ToolResponseMessage) -> None: -- await self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- async def send_tool_error(self, message: ToolErrorMessage) -> None: -- await self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- async def send_pause_assistant(self, message: PauseAssistantMessage) -> None: -- await self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- async def send_resume_assistant(self, message: ResumeAssistantMessage) -> None: -- await self.send_publish(message) -- - - class ChatSocketClient(EventEmitterMixin): - def __init__(self, *, websocket: websockets_sync_connection.Connection): -@@ -234,35 +137,3 @@ class ChatSocketClient(EventEmitterMixin): - Send a Pydantic model to the websocket connection. - """ - self._send(data.dict()) -- -- @deprecated("Use send_publish instead.") -- def send_audio_input(self, message: AudioInput) -> None: -- self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- def send_session_settings(self, message: SessionSettingsMessage) -> None: -- self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- def send_user_input(self, message: UserInput) -> None: -- self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- def send_assistant_input(self, message: AssistantInput) -> None: -- self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- def send_tool_response(self, message: ToolResponseMessage) -> None: -- self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- def send_tool_error(self, message: ToolErrorMessage) -> None: -- self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- def send_pause_assistant(self, message: PauseAssistantMessage) -> None: -- self.send_publish(message) -- -- @deprecated("Use send_publish instead.") -- def send_resume_assistant(self, message: ResumeAssistantMessage) -> None: -- self.send_publish(message) diff --git a/src/hume/empathic_voice/client.py.diff b/src/hume/empathic_voice/client.py.diff deleted file mode 100644 index f2b276f0..00000000 --- a/src/hume/empathic_voice/client.py.diff +++ /dev/null @@ -1,203 +0,0 @@ -diff --git a/src/hume/empathic_voice/client.py b/src/hume/empathic_voice/client.py -index e9119462..4fba8feb 100644 ---- a/src/hume/empathic_voice/client.py -+++ b/src/hume/empathic_voice/client.py -@@ -4,15 +4,15 @@ from __future__ import annotations - - import typing - --from hume.empathic_voice.chat.client import AsyncChatClient, ChatClient -- - from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper - from .raw_client import AsyncRawEmpathicVoiceClient, RawEmpathicVoiceClient - - if typing.TYPE_CHECKING: -+ from .chat.client import AsyncChatClient, ChatClient - from .chat_groups.client import AsyncChatGroupsClient, ChatGroupsClient - from .chats.client import AsyncChatsClient, ChatsClient - from .configs.client import AsyncConfigsClient, ConfigsClient -+ from .control_plane.client import AsyncControlPlaneClient, ControlPlaneClient - from .prompts.client import AsyncPromptsClient, PromptsClient - from .tools.client import AsyncToolsClient, ToolsClient - -@@ -21,11 +21,12 @@ class EmpathicVoiceClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawEmpathicVoiceClient(client_wrapper=client_wrapper) - self._client_wrapper = client_wrapper -- self._tools: typing.Optional[ToolsClient] = None -- self._prompts: typing.Optional[PromptsClient] = None -- self._configs: typing.Optional[ConfigsClient] = None -- self._chats: typing.Optional[ChatsClient] = None -+ self._control_plane: typing.Optional[ControlPlaneClient] = None - self._chat_groups: typing.Optional[ChatGroupsClient] = None -+ self._chats: typing.Optional[ChatsClient] = None -+ self._configs: typing.Optional[ConfigsClient] = None -+ self._prompts: typing.Optional[PromptsClient] = None -+ self._tools: typing.Optional[ToolsClient] = None - self._chat: typing.Optional[ChatClient] = None - - @property -@@ -40,20 +41,28 @@ class EmpathicVoiceClient: - return self._raw_client - - @property -- def tools(self): -- if self._tools is None: -- from .tools.client import ToolsClient # noqa: E402 -+ def control_plane(self): -+ if self._control_plane is None: -+ from .control_plane.client import ControlPlaneClient # noqa: E402 - -- self._tools = ToolsClient(client_wrapper=self._client_wrapper) -- return self._tools -+ self._control_plane = ControlPlaneClient(client_wrapper=self._client_wrapper) -+ return self._control_plane - - @property -- def prompts(self): -- if self._prompts is None: -- from .prompts.client import PromptsClient # noqa: E402 -+ def chat_groups(self): -+ if self._chat_groups is None: -+ from .chat_groups.client import ChatGroupsClient # noqa: E402 - -- self._prompts = PromptsClient(client_wrapper=self._client_wrapper) -- return self._prompts -+ self._chat_groups = ChatGroupsClient(client_wrapper=self._client_wrapper) -+ return self._chat_groups -+ -+ @property -+ def chats(self): -+ if self._chats is None: -+ from .chats.client import ChatsClient # noqa: E402 -+ -+ self._chats = ChatsClient(client_wrapper=self._client_wrapper) -+ return self._chats - - @property - def configs(self): -@@ -64,31 +73,40 @@ class EmpathicVoiceClient: - return self._configs - - @property -- def chats(self): -- if self._chats is None: -- from .chats.client import ChatsClient # noqa: E402 -+ def prompts(self): -+ if self._prompts is None: -+ from .prompts.client import PromptsClient # noqa: E402 - -- self._chats = ChatsClient(client_wrapper=self._client_wrapper) -- return self._chats -+ self._prompts = PromptsClient(client_wrapper=self._client_wrapper) -+ return self._prompts - - @property -- def chat_groups(self): -- if self._chat_groups is None: -- from .chat_groups.client import ChatGroupsClient # noqa: E402 -+ def tools(self): -+ if self._tools is None: -+ from .tools.client import ToolsClient # noqa: E402 - -- self._chat_groups = ChatGroupsClient(client_wrapper=self._client_wrapper) -- return self._chat_groups -+ self._tools = ToolsClient(client_wrapper=self._client_wrapper) -+ return self._tools -+ -+ @property -+ def chat(self): -+ if self._chat is None: -+ from .chat.client import ChatClient # noqa: E402 -+ -+ self._chat = ChatClient(client_wrapper=self._client_wrapper) -+ return self._chat - - - class AsyncEmpathicVoiceClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawEmpathicVoiceClient(client_wrapper=client_wrapper) - self._client_wrapper = client_wrapper -- self._tools: typing.Optional[AsyncToolsClient] = None -- self._prompts: typing.Optional[AsyncPromptsClient] = None -- self._configs: typing.Optional[AsyncConfigsClient] = None -- self._chats: typing.Optional[AsyncChatsClient] = None -+ self._control_plane: typing.Optional[AsyncControlPlaneClient] = None - self._chat_groups: typing.Optional[AsyncChatGroupsClient] = None -+ self._chats: typing.Optional[AsyncChatsClient] = None -+ self._configs: typing.Optional[AsyncConfigsClient] = None -+ self._prompts: typing.Optional[AsyncPromptsClient] = None -+ self._tools: typing.Optional[AsyncToolsClient] = None - self._chat: typing.Optional[AsyncChatClient] = None - - @property -@@ -103,20 +121,28 @@ class AsyncEmpathicVoiceClient: - return self._raw_client - - @property -- def tools(self): -- if self._tools is None: -- from .tools.client import AsyncToolsClient # noqa: E402 -+ def control_plane(self): -+ if self._control_plane is None: -+ from .control_plane.client import AsyncControlPlaneClient # noqa: E402 - -- self._tools = AsyncToolsClient(client_wrapper=self._client_wrapper) -- return self._tools -+ self._control_plane = AsyncControlPlaneClient(client_wrapper=self._client_wrapper) -+ return self._control_plane - - @property -- def prompts(self): -- if self._prompts is None: -- from .prompts.client import AsyncPromptsClient # noqa: E402 -+ def chat_groups(self): -+ if self._chat_groups is None: -+ from .chat_groups.client import AsyncChatGroupsClient # noqa: E402 - -- self._prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) -- return self._prompts -+ self._chat_groups = AsyncChatGroupsClient(client_wrapper=self._client_wrapper) -+ return self._chat_groups -+ -+ @property -+ def chats(self): -+ if self._chats is None: -+ from .chats.client import AsyncChatsClient # noqa: E402 -+ -+ self._chats = AsyncChatsClient(client_wrapper=self._client_wrapper) -+ return self._chats - - @property - def configs(self): -@@ -127,20 +153,20 @@ class AsyncEmpathicVoiceClient: - return self._configs - - @property -- def chats(self): -- if self._chats is None: -- from .chats.client import AsyncChatsClient # noqa: E402 -+ def prompts(self): -+ if self._prompts is None: -+ from .prompts.client import AsyncPromptsClient # noqa: E402 - -- self._chats = AsyncChatsClient(client_wrapper=self._client_wrapper) -- return self._chats -+ self._prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) -+ return self._prompts - - @property -- def chat_groups(self): -- if self._chat_groups is None: -- from .chat_groups.client import AsyncChatGroupsClient # noqa: E402 -+ def tools(self): -+ if self._tools is None: -+ from .tools.client import AsyncToolsClient # noqa: E402 - -- self._chat_groups = AsyncChatGroupsClient(client_wrapper=self._client_wrapper) -- return self._chat_groups -+ self._tools = AsyncToolsClient(client_wrapper=self._client_wrapper) -+ return self._tools - - @property - def chat(self): diff --git a/src/hume/expression_measurement/batch/types/inference_job.py.diff b/src/hume/expression_measurement/batch/types/inference_job.py.diff deleted file mode 100644 index f2e72b13..00000000 --- a/src/hume/expression_measurement/batch/types/inference_job.py.diff +++ /dev/null @@ -1,24 +0,0 @@ -diff --git a/src/hume/expression_measurement/batch/types/inference_job.py b/src/hume/expression_measurement/batch/types/inference_job.py -index 08add412..83a68f84 100644 ---- a/src/hume/expression_measurement/batch/types/inference_job.py -+++ b/src/hume/expression_measurement/batch/types/inference_job.py -@@ -1,7 +1,6 @@ - # This file was auto-generated by Fern from our API Definition. - - import typing --from typing_extensions import deprecated - - import pydantic - from ....core.pydantic_utilities import IS_PYDANTIC_V2 -@@ -16,11 +15,6 @@ class InferenceJob(JobInference): - Jobs created with the Expression Measurement API will have this field set to `INFERENCE`. - """ - -- @property -- @deprecated("Use .state.status instead") -- def status(self) -> str: -- return self.state.status -- - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: diff --git a/src/hume/expression_measurement/client.py.diff b/src/hume/expression_measurement/client.py.diff deleted file mode 100644 index 612621be..00000000 --- a/src/hume/expression_measurement/client.py.diff +++ /dev/null @@ -1,70 +0,0 @@ -diff --git a/src/hume/expression_measurement/client.py b/src/hume/expression_measurement/client.py -index f75d9210..a7651e40 100644 ---- a/src/hume/expression_measurement/client.py -+++ b/src/hume/expression_measurement/client.py -@@ -8,16 +8,14 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper - from .raw_client import AsyncRawExpressionMeasurementClient, RawExpressionMeasurementClient - - if typing.TYPE_CHECKING: -- from .batch.client_with_utils import AsyncBatchClientWithUtils, BatchClientWithUtils -- from .stream.stream.client import StreamClient, AsyncStreamClient -+ from .batch.client import AsyncBatchClient, BatchClient - - - class ExpressionMeasurementClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawExpressionMeasurementClient(client_wrapper=client_wrapper) - self._client_wrapper = client_wrapper -- self._batch: typing.Optional[BatchClientWithUtils] = None -- self._stream: typing.Optional[StreamClient] = None -+ self._batch: typing.Optional[BatchClient] = None - - @property - def with_raw_response(self) -> RawExpressionMeasurementClient: -@@ -33,25 +31,17 @@ class ExpressionMeasurementClient: - @property - def batch(self): - if self._batch is None: -- from .batch.client_with_utils import BatchClientWithUtils # noqa: E402 -+ from .batch.client import BatchClient # noqa: E402 - -- self._batch = BatchClientWithUtils(client_wrapper=self._client_wrapper) -+ self._batch = BatchClient(client_wrapper=self._client_wrapper) - return self._batch - -- @property -- def stream(self): -- if self._stream is None: -- from .stream.stream.client import StreamClient # noqa: E402 -- self._stream = StreamClient(client_wrapper=self._client_wrapper) -- return self._stream -- - - class AsyncExpressionMeasurementClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawExpressionMeasurementClient(client_wrapper=client_wrapper) - self._client_wrapper = client_wrapper -- self._batch: typing.Optional[AsyncBatchClientWithUtils] = None -- self._stream: typing.Optional[AsyncStreamClient] = None -+ self._batch: typing.Optional[AsyncBatchClient] = None - - @property - def with_raw_response(self) -> AsyncRawExpressionMeasurementClient: -@@ -67,15 +57,7 @@ class AsyncExpressionMeasurementClient: - @property - def batch(self): - if self._batch is None: -- from .batch.client_with_utils import AsyncBatchClientWithUtils # noqa: E402 -+ from .batch.client import AsyncBatchClient # noqa: E402 - -- self._batch = AsyncBatchClientWithUtils(client_wrapper=self._client_wrapper) -+ self._batch = AsyncBatchClient(client_wrapper=self._client_wrapper) - return self._batch -- -- @property -- def stream(self): -- if self._stream is None: -- from .stream.stream.client import AsyncStreamClient # noqa: E402 -- -- self._stream = AsyncStreamClient(client_wrapper=self._client_wrapper) -- return self._stream diff --git a/src/hume/expression_measurement/stream/stream/socket_client.py.diff b/src/hume/expression_measurement/stream/stream/socket_client.py.diff deleted file mode 100644 index ac65dfb4..00000000 --- a/src/hume/expression_measurement/stream/stream/socket_client.py.diff +++ /dev/null @@ -1,170 +0,0 @@ -diff --git a/src/hume/expression_measurement/stream/stream/socket_client.py b/src/hume/expression_measurement/stream/stream/socket_client.py -index fcd83929..85935e4e 100644 ---- a/src/hume/expression_measurement/stream/stream/socket_client.py -+++ b/src/hume/expression_measurement/stream/stream/socket_client.py -@@ -1,18 +1,13 @@ - # This file was auto-generated by Fern from our API Definition. - --import base64 - import json - import typing - from json.decoder import JSONDecodeError --from pathlib import Path - - import websockets - import websockets.sync.connection as websockets_sync_connection -- --from ....core.api_error import ApiError - from ....core.events import EventEmitterMixin, EventType - from ....core.pydantic_utilities import parse_obj_as --from .types.config import Config - from .types.stream_models_endpoint_payload import StreamModelsEndpointPayload - from .types.subscribe_event import SubscribeEvent - -@@ -83,74 +78,6 @@ class AsyncStreamSocketClient(EventEmitterMixin): - """ - await self._send(data.dict()) - -- async def send_facemesh( -- self, -- landmarks: typing.List[typing.List[typing.List[float]]], -- config: typing.Optional[Config] = None, -- payload_id: typing.Optional[str] = None, -- ) -> StreamSocketClientResponse: -- landmarks_str = json.dumps(landmarks) -- payload = { -- "data": landmarks_str, -- "models": config.dict() if config else None, -- "raw_text": False, -- "payload_id": payload_id, -- } -- payload = {k: v for k, v in payload.items() if v is not None} -- await self._websocket.send(json.dumps(payload)) -- return await self.recv() -- -- async def send_text( -- self, -- text: str, -- config: typing.Optional[Config] = None, -- payload_id: typing.Optional[str] = None, -- ) -> StreamSocketClientResponse: -- payload = { -- "data": text, -- "models": config.dict() if config else None, -- "raw_text": True, -- "payload_id": payload_id, -- } -- payload = {k: v for k, v in payload.items() if v is not None} -- await self._websocket.send(json.dumps(payload)) -- return await self.recv() -- -- async def send_file( -- self, -- file_: typing.Union[str, Path], -- config: typing.Optional[Config] = None, -- payload_id: typing.Optional[str] = None, -- ) -> StreamSocketClientResponse: -- try: -- with open(file_, "rb") as f: -- bytes_data = base64.b64encode(f.read()).decode() -- except: -- if isinstance(file_, Path): -- raise ApiError(body=f"Failed to open file: {file_}") -- # If you cannot open the file, assume you were passed a b64 string, not a file path -- bytes_data = str(file_) -- -- payload = { -- "data": bytes_data, -- "models": config.dict() if config else None, -- "raw_text": False, -- "payload_id": payload_id, -- } -- payload = {k: v for k, v in payload.items() if v is not None} -- await self._websocket.send(json.dumps(payload)) -- return await self.recv() -- -- async def get_job_details(self) -> StreamSocketClientResponse: -- payload = {"job_details": True} -- await self._websocket.send(json.dumps(payload)) -- return await self.recv() -- -- async def reset(self) -> StreamSocketClientResponse: -- payload = {"reset_stream": True} -- await self._websocket.send(json.dumps(payload)) -- return await self.recv() -- - - class StreamSocketClient(EventEmitterMixin): - def __init__(self, *, websocket: websockets_sync_connection.Connection): -@@ -210,71 +137,3 @@ class StreamSocketClient(EventEmitterMixin): - Send a Pydantic model to the websocket connection. - """ - self._send(data.dict()) -- -- def send_facemesh( -- self, -- landmarks: typing.List[typing.List[typing.List[float]]], -- config: typing.Optional[Config] = None, -- payload_id: typing.Optional[str] = None, -- ) -> StreamSocketClientResponse: -- landmarks_str = json.dumps(landmarks) -- payload = { -- "data": landmarks_str, -- "models": config.dict() if config else None, -- "raw_text": False, -- "payload_id": payload_id, -- } -- payload = {k: v for k, v in payload.items() if v is not None} -- self._websocket.send(json.dumps(payload)) -- return self.recv() -- -- def send_text( -- self, -- text: str, -- config: typing.Optional[Config] = None, -- payload_id: typing.Optional[str] = None, -- ) -> StreamSocketClientResponse: -- payload = { -- "data": text, -- "models": config.dict() if config else None, -- "raw_text": True, -- "payload_id": payload_id, -- } -- payload = {k: v for k, v in payload.items() if v is not None} -- self._websocket.send(json.dumps(payload)) -- return self.recv() -- -- def send_file( -- self, -- file_: typing.Union[str, Path], -- config: typing.Optional[Config] = None, -- payload_id: typing.Optional[str] = None, -- ) -> StreamSocketClientResponse: -- try: -- with open(file_, "rb") as f: -- bytes_data = base64.b64encode(f.read()).decode() -- except: -- if isinstance(file_, Path): -- raise ApiError(body=f"Failed to open file: {file_}") -- # If you cannot open the file, assume you were passed a b64 string, not a file path -- bytes_data = str(file_) -- -- payload = { -- "data": bytes_data, -- "models": config.dict() if config else None, -- "raw_text": False, -- "payload_id": payload_id, -- } -- payload = {k: v for k, v in payload.items() if v is not None} -- self._websocket.send(json.dumps(payload)) -- return self.recv() -- -- def get_job_details(self) -> StreamSocketClientResponse: -- payload = {"job_details": True} -- self._websocket.send(json.dumps(payload)) -- return self.recv() -- -- def reset(self) -> StreamSocketClientResponse: -- payload = {"reset_stream": True} -- self._websocket.send(json.dumps(payload)) -- return self.recv() diff --git a/tests/wire/test_tts.py b/tests/wire/test_tts.py index 4ee2aa69..55d33942 100644 --- a/tests/wire/test_tts.py +++ b/tests/wire/test_tts.py @@ -23,8 +23,7 @@ def test_tts_synthesize_file() -> None: """Test synthesize-file endpoint with WireMock""" test_id = "tts.synthesize_file.0" client = get_client(test_id) - # Must consume the iterator to trigger the HTTP request - list(client.tts.synthesize_file( + client.tts.synthesize_file( context={"generation_id": "09ad914d-8e7f-40f8-a279-e34f07f7dab2"}, format={"type": "mp3"}, num_generations=1, @@ -34,7 +33,7 @@ def test_tts_synthesize_file() -> None: "description": "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.", } ], - )) + ) verify_request_count(test_id, "POST", "/v0/tts/file", None, 1) @@ -42,15 +41,14 @@ def test_tts_synthesize_file_streaming() -> None: """Test synthesize-file-streaming endpoint with WireMock""" test_id = "tts.synthesize_file_streaming.0" client = get_client(test_id) - # Must consume the iterator to trigger the HTTP request - list(client.tts.synthesize_file_streaming( + client.tts.synthesize_file_streaming( utterances=[ { "text": "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.", "voice": {"provider": "HUME_AI"}, } ] - )) + ) verify_request_count(test_id, "POST", "/v0/tts/stream/file", None, 1) @@ -58,15 +56,14 @@ def test_tts_synthesize_json_streaming() -> None: """Test synthesize-json-streaming endpoint with WireMock""" test_id = "tts.synthesize_json_streaming.0" client = get_client(test_id) - # Must consume the iterator to trigger the HTTP request - list(client.tts.synthesize_json_streaming( + client.tts.synthesize_json_streaming( utterances=[ { "text": "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.", "voice": {"provider": "HUME_AI"}, } ] - )) + ) verify_request_count(test_id, "POST", "/v0/tts/stream/json", None, 1) @@ -74,6 +71,5 @@ def test_tts_convert_voice_json() -> None: """Test convertVoiceJson endpoint with WireMock""" test_id = "tts.convert_voice_json.0" client = get_client(test_id) - # Must consume the iterator to trigger the HTTP request - list(client.tts.convert_voice_json(audio="example_audio")) + client.tts.convert_voice_json(audio="example_audio") verify_request_count(test_id, "POST", "/v0/tts/voice_conversion/json", None, 1) From c8d923ea50d4e4cdb4ce0689e2a622c50d0501b2 Mon Sep 17 00:00:00 2001 From: twitchard Date: Tue, 13 Jan 2026 13:31:22 -0600 Subject: [PATCH 2/2] Add .diff files for fern-ignored files --- src/hume/empathic_voice/chat/client.py.diff | 87 +++++++++ .../empathic_voice/chat/raw_client.py.diff | 176 ++++++++++++++++++ .../empathic_voice/chat/socket_client.py.diff | 163 ++++++++++++++++ .../batch/types/inference_job.py.diff | 24 +++ .../expression_measurement/client.py.diff | 70 +++++++ .../stream/stream/socket_client.py.diff | 170 +++++++++++++++++ src/hume/tts/client.py.diff | 1 + 7 files changed, 691 insertions(+) create mode 100644 src/hume/empathic_voice/chat/client.py.diff create mode 100644 src/hume/empathic_voice/chat/raw_client.py.diff create mode 100644 src/hume/empathic_voice/chat/socket_client.py.diff create mode 100644 src/hume/expression_measurement/batch/types/inference_job.py.diff create mode 100644 src/hume/expression_measurement/client.py.diff create mode 100644 src/hume/expression_measurement/stream/stream/socket_client.py.diff create mode 100644 src/hume/tts/client.py.diff diff --git a/src/hume/empathic_voice/chat/client.py.diff b/src/hume/empathic_voice/chat/client.py.diff new file mode 100644 index 00000000..aa685fdd --- /dev/null +++ b/src/hume/empathic_voice/chat/client.py.diff @@ -0,0 +1,87 @@ +diff --git a/src/hume/empathic_voice/chat/client.py b/src/hume/empathic_voice/chat/client.py +index 43a9cf28..8ec7e4cd 100644 +--- a/src/hume/empathic_voice/chat/client.py ++++ b/src/hume/empathic_voice/chat/client.py +@@ -10,7 +10,6 @@ from ...core.api_error import ApiError + from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper + from ...core.request_options import RequestOptions + from ...core.serialization import convert_and_respect_annotation_metadata +-from ...core.query_encoder import single_query_encoder + from ..types.connect_session_settings import ConnectSessionSettings + from .raw_client import AsyncRawChatClient, RawChatClient + from .socket_client import AsyncChatSocketClient, ChatSocketClient +@@ -48,7 +47,7 @@ class ChatClient: + resumed_chat_group_id: typing.Optional[str] = None, + verbose_transcription: typing.Optional[bool] = None, + api_key: typing.Optional[str] = None, +- session_settings: typing.Optional[ConnectSessionSettings] = None, ++ session_settings: ConnectSessionSettings, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[ChatSocketClient]: + """ +@@ -130,10 +129,18 @@ class ChatClient: + query_params = query_params.add("verbose_transcription", verbose_transcription) + if api_key is not None: + query_params = query_params.add("api_key", api_key) +- if session_settings is not None: +- flattened_params = single_query_encoder("session_settings", session_settings) +- for param_key, param_value in flattened_params: +- query_params = query_params.add(param_key, param_value) ++ if ( ++ convert_and_respect_annotation_metadata( ++ object_=session_settings, annotation=ConnectSessionSettings, direction="write" ++ ) ++ is not None ++ ): ++ query_params = query_params.add( ++ "session_settings", ++ convert_and_respect_annotation_metadata( ++ object_=session_settings, annotation=ConnectSessionSettings, direction="write" ++ ), ++ ) + ws_url = ws_url + f"?{query_params}" + headers = self._raw_client._client_wrapper.get_headers() + if request_options and "additional_headers" in request_options: +@@ -183,7 +190,7 @@ class AsyncChatClient: + resumed_chat_group_id: typing.Optional[str] = None, + verbose_transcription: typing.Optional[bool] = None, + api_key: typing.Optional[str] = None, +- session_settings: typing.Optional[ConnectSessionSettings] = None, ++ session_settings: ConnectSessionSettings, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AsyncChatSocketClient]: + """ +@@ -238,7 +245,7 @@ class AsyncChatClient: + + api_key : typing.Optional[str] + +- session_settings : typing.Optional[ConnectSessionSettings] ++ session_settings : ConnectSessionSettings + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. +@@ -265,12 +272,18 @@ class AsyncChatClient: + query_params = query_params.add("verbose_transcription", verbose_transcription) + if api_key is not None: + query_params = query_params.add("api_key", api_key) +- +- if session_settings is not None: +- flattened_params = single_query_encoder("session_settings", session_settings) +- for param_key, param_value in flattened_params: +- query_params = query_params.add(param_key, param_value) +- ++ if ( ++ convert_and_respect_annotation_metadata( ++ object_=session_settings, annotation=ConnectSessionSettings, direction="write" ++ ) ++ is not None ++ ): ++ query_params = query_params.add( ++ "session_settings", ++ convert_and_respect_annotation_metadata( ++ object_=session_settings, annotation=ConnectSessionSettings, direction="write" ++ ), ++ ) + ws_url = ws_url + f"?{query_params}" + headers = self._raw_client._client_wrapper.get_headers() + if request_options and "additional_headers" in request_options: diff --git a/src/hume/empathic_voice/chat/raw_client.py.diff b/src/hume/empathic_voice/chat/raw_client.py.diff new file mode 100644 index 00000000..14716f92 --- /dev/null +++ b/src/hume/empathic_voice/chat/raw_client.py.diff @@ -0,0 +1,176 @@ +diff --git a/src/hume/empathic_voice/chat/raw_client.py b/src/hume/empathic_voice/chat/raw_client.py +index fefee870..23e2b4a0 100644 +--- a/src/hume/empathic_voice/chat/raw_client.py ++++ b/src/hume/empathic_voice/chat/raw_client.py +@@ -10,7 +10,6 @@ from ...core.api_error import ApiError + from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper + from ...core.request_options import RequestOptions + from ...core.serialization import convert_and_respect_annotation_metadata +-from ...core.query_encoder import single_query_encoder + from ..types.connect_session_settings import ConnectSessionSettings + from .socket_client import AsyncChatSocketClient, ChatSocketClient + +@@ -29,6 +28,7 @@ class RawChatClient: + self, + *, + access_token: typing.Optional[str] = None, ++ allow_connection: typing.Optional[bool] = None, + config_id: typing.Optional[str] = None, + config_version: typing.Optional[int] = None, + event_limit: typing.Optional[int] = None, +@@ -50,12 +50,15 @@ class RawChatClient: + + For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). + ++ allow_connection : typing.Optional[bool] ++ Allows external connections to this chat via the /connect endpoint. ++ + config_id : typing.Optional[str] + The unique identifier for an EVI configuration. + +- Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/speech-to-speech-evi/configuration/build-a-configuration#default-configuration). ++ Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/empathic-voice-interface-evi/configuration#default-configuration). + +- For help obtaining this ID, see our [Configuration Guide](/docs/speech-to-speech-evi/configuration). ++ For help obtaining this ID, see our [Configuration Guide](/docs/empathic-voice-interface-evi/configuration). + + config_version : typing.Optional[int] + The version number of the EVI configuration specified by the `config_id`. +@@ -76,19 +79,16 @@ class RawChatClient: + + There are three ways to obtain the Chat Group ID: + +- - [Chat Metadata](/reference/speech-to-speech-evi/chat#receive.ChatMetadata): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. ++ - [Chat Metadata](/reference/empathic-voice-interface-evi/chat/chat#receive.Chat%20Metadata.type): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. + +- - [List Chats endpoint](/reference/speech-to-speech-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. ++ - [List Chats endpoint](/reference/empathic-voice-interface-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. + +- - [List Chat Groups endpoint](/reference/speech-to-speech-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. ++ - [List Chat Groups endpoint](/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. + + verbose_transcription : typing.Optional[bool] +- A flag to enable verbose transcription. Set this query parameter to `true` to have unfinalized user transcripts be sent to the client as interim UserMessage messages. The [interim](/reference/speech-to-speech-evi/chat#receive.UserMessage.interim) field on a [UserMessage](/reference/speech-to-speech-evi/chat#receive.UserMessage) denotes whether the message is "interim" or "final." ++ A flag to enable verbose transcription. Set this query parameter to `"true"` to have unfinalized user transcripts be sent to the client as interim `UserMessage` messages. + + api_key : typing.Optional[str] +- API key used for authenticating the client. If not provided, an `access_token` must be provided to authenticate. +- +- For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). + + session_settings : ConnectSessionSettings + +@@ -103,6 +103,8 @@ class RawChatClient: + query_params = httpx.QueryParams() + if access_token is not None: + query_params = query_params.add("access_token", access_token) ++ if allow_connection is not None: ++ query_params = query_params.add("allow_connection", allow_connection) + if config_id is not None: + query_params = query_params.add("config_id", config_id) + if config_version is not None: +@@ -115,10 +117,18 @@ class RawChatClient: + query_params = query_params.add("verbose_transcription", verbose_transcription) + if api_key is not None: + query_params = query_params.add("api_key", api_key) +- if session_settings is not None: +- flattened_params = single_query_encoder("session_settings", session_settings) +- for param_key, param_value in flattened_params: +- query_params = query_params.add(param_key, str(param_value)) ++ if ( ++ convert_and_respect_annotation_metadata( ++ object_=session_settings, annotation=ConnectSessionSettings, direction="write" ++ ) ++ is not None ++ ): ++ query_params = query_params.add( ++ "session_settings", ++ convert_and_respect_annotation_metadata( ++ object_=session_settings, annotation=ConnectSessionSettings, direction="write" ++ ), ++ ) + ws_url = ws_url + f"?{query_params}" + headers = self._client_wrapper.get_headers() + if request_options and "additional_headers" in request_options: +@@ -150,6 +160,7 @@ class AsyncRawChatClient: + self, + *, + access_token: typing.Optional[str] = None, ++ allow_connection: typing.Optional[bool] = None, + config_id: typing.Optional[str] = None, + config_version: typing.Optional[int] = None, + event_limit: typing.Optional[int] = None, +@@ -171,12 +182,15 @@ class AsyncRawChatClient: + + For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). + ++ allow_connection : typing.Optional[bool] ++ Allows external connections to this chat via the /connect endpoint. ++ + config_id : typing.Optional[str] + The unique identifier for an EVI configuration. + +- Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/speech-to-speech-evi/configuration/build-a-configuration#default-configuration). ++ Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/empathic-voice-interface-evi/configuration#default-configuration). + +- For help obtaining this ID, see our [Configuration Guide](/docs/speech-to-speech-evi/configuration). ++ For help obtaining this ID, see our [Configuration Guide](/docs/empathic-voice-interface-evi/configuration). + + config_version : typing.Optional[int] + The version number of the EVI configuration specified by the `config_id`. +@@ -197,19 +211,16 @@ class AsyncRawChatClient: + + There are three ways to obtain the Chat Group ID: + +- - [Chat Metadata](/reference/speech-to-speech-evi/chat#receive.ChatMetadata): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. ++ - [Chat Metadata](/reference/empathic-voice-interface-evi/chat/chat#receive.Chat%20Metadata.type): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. + +- - [List Chats endpoint](/reference/speech-to-speech-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. ++ - [List Chats endpoint](/reference/empathic-voice-interface-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. + +- - [List Chat Groups endpoint](/reference/speech-to-speech-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. ++ - [List Chat Groups endpoint](/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. + + verbose_transcription : typing.Optional[bool] +- A flag to enable verbose transcription. Set this query parameter to `true` to have unfinalized user transcripts be sent to the client as interim UserMessage messages. The [interim](/reference/speech-to-speech-evi/chat#receive.UserMessage.interim) field on a [UserMessage](/reference/speech-to-speech-evi/chat#receive.UserMessage) denotes whether the message is "interim" or "final." ++ A flag to enable verbose transcription. Set this query parameter to `"true"` to have unfinalized user transcripts be sent to the client as interim `UserMessage` messages. + + api_key : typing.Optional[str] +- API key used for authenticating the client. If not provided, an `access_token` must be provided to authenticate. +- +- For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). + + session_settings : ConnectSessionSettings + +@@ -224,6 +235,8 @@ class AsyncRawChatClient: + query_params = httpx.QueryParams() + if access_token is not None: + query_params = query_params.add("access_token", access_token) ++ if allow_connection is not None: ++ query_params = query_params.add("allow_connection", allow_connection) + if config_id is not None: + query_params = query_params.add("config_id", config_id) + if config_version is not None: +@@ -236,10 +249,18 @@ class AsyncRawChatClient: + query_params = query_params.add("verbose_transcription", verbose_transcription) + if api_key is not None: + query_params = query_params.add("api_key", api_key) +- if session_settings is not None: +- flattened_params = single_query_encoder("session_settings", session_settings) +- for param_key, param_value in flattened_params: +- query_params = query_params.add(param_key, str(param_value)) ++ if ( ++ convert_and_respect_annotation_metadata( ++ object_=session_settings, annotation=ConnectSessionSettings, direction="write" ++ ) ++ is not None ++ ): ++ query_params = query_params.add( ++ "session_settings", ++ convert_and_respect_annotation_metadata( ++ object_=session_settings, annotation=ConnectSessionSettings, direction="write" ++ ), ++ ) + ws_url = ws_url + f"?{query_params}" + headers = self._client_wrapper.get_headers() + if request_options and "additional_headers" in request_options: diff --git a/src/hume/empathic_voice/chat/socket_client.py.diff b/src/hume/empathic_voice/chat/socket_client.py.diff new file mode 100644 index 00000000..759d2c9b --- /dev/null +++ b/src/hume/empathic_voice/chat/socket_client.py.diff @@ -0,0 +1,163 @@ +diff --git a/src/hume/empathic_voice/chat/socket_client.py b/src/hume/empathic_voice/chat/socket_client.py +index 9aaa1956..18ee74ab 100644 +--- a/src/hume/empathic_voice/chat/socket_client.py ++++ b/src/hume/empathic_voice/chat/socket_client.py +@@ -6,81 +6,18 @@ from json.decoder import JSONDecodeError + + import websockets + import websockets.sync.connection as websockets_sync_connection +-from typing_extensions import deprecated +-from contextlib import asynccontextmanager +- + from ...core.events import EventEmitterMixin, EventType + from ...core.pydantic_utilities import parse_obj_as +-from ..types.assistant_input import AssistantInput +-from ..types.audio_input import AudioInput +-from ..types.pause_assistant_message import PauseAssistantMessage +-from ..types.resume_assistant_message import ResumeAssistantMessage +-from ..types.session_settings import SessionSettings +-from ..types.tool_error_message import ToolErrorMessage +-from ..types.tool_response_message import ToolResponseMessage +-from ..types.user_input import UserInput +-from .types.publish_event import PublishEvent + from ..types.subscribe_event import SubscribeEvent ++from .types.publish_event import PublishEvent + + try: + from websockets.legacy.client import WebSocketClientProtocol # type: ignore + except ImportError: + from websockets import WebSocketClientProtocol # type: ignore + +-ChatSocketClientResponse = SubscribeEvent +- +-class ChatConnectSessionSettingsAudio(typing.TypedDict, total=False): +- channels: typing.Optional[int] +- encoding: typing.Optional[str] +- sample_rate: typing.Optional[int] +- +- +-class ChatConnectSessionSettingsContext(typing.TypedDict, total=False): +- text: typing.Optional[str] +- +- +-SessionSettingsVariablesValue = typing.Union[str, float, bool] +- +-class ChatConnectSessionSettings(typing.TypedDict, total=False): +- audio: typing.Optional[ChatConnectSessionSettingsAudio] +- context: typing.Optional[ChatConnectSessionSettingsContext] +- custom_session_id: typing.Optional[str] +- event_limit: typing.Optional[int] +- language_model_api_key: typing.Optional[str] +- system_prompt: typing.Optional[str] +- variables: typing.Optional[typing.Dict[str, SessionSettingsVariablesValue]] +- voice_id: typing.Optional[str] +- +-@deprecated("Use .connect() with kwargs instead.") +-class ChatConnectOptions(typing.TypedDict, total=False): +- config_id: typing.Optional[str] +- """ +- The ID of the configuration. +- """ +- +- config_version: typing.Optional[str] +- """ +- The version of the configuration. +- """ +- +- api_key: typing.Optional[str] +- +- secret_key: typing.Optional[str] +- +- resumed_chat_group_id: typing.Optional[str] +- +- verbose_transcription: typing.Optional[bool] +- +- """ +- ID of the Voice to use for this chat. If specified, will override the voice set in the Config +- """ +- voice_id: typing.Optional[str] ++ChatSocketClientResponse = typing.Union[SubscribeEvent] + +- session_settings: typing.Optional[typing.Dict] +- """ +- Session settings to apply at connection time. Supports all SessionSettings fields except +- builtin_tools, type, metadata, and tools. Additionally supports event_limit. +- """ + + class AsyncChatSocketClient(EventEmitterMixin): + def __init__(self, *, websocket: WebSocketClientProtocol): +@@ -141,38 +78,6 @@ class AsyncChatSocketClient(EventEmitterMixin): + """ + await self._send(data.dict()) + +- @deprecated("Use send_publish instead.") +- async def send_audio_input(self, message: AudioInput) -> None: +- await self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- async def send_session_settings(self, message: SessionSettings) -> None: +- await self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- async def send_user_input(self, message: UserInput) -> None: +- await self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- async def send_assistant_input(self, message: AssistantInput) -> None: +- await self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- async def send_tool_response(self, message: ToolResponseMessage) -> None: +- await self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- async def send_tool_error(self, message: ToolErrorMessage) -> None: +- await self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- async def send_pause_assistant(self, message: PauseAssistantMessage) -> None: +- await self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- async def send_resume_assistant(self, message: ResumeAssistantMessage) -> None: +- await self.send_publish(message) +- + + class ChatSocketClient(EventEmitterMixin): + def __init__(self, *, websocket: websockets_sync_connection.Connection): +@@ -232,35 +137,3 @@ class ChatSocketClient(EventEmitterMixin): + Send a Pydantic model to the websocket connection. + """ + self._send(data.dict()) +- +- @deprecated("Use send_publish instead.") +- def send_audio_input(self, message: AudioInput) -> None: +- self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- def send_session_settings(self, message: SessionSettings) -> None: +- self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- def send_user_input(self, message: UserInput) -> None: +- self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- def send_assistant_input(self, message: AssistantInput) -> None: +- self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- def send_tool_response(self, message: ToolResponseMessage) -> None: +- self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- def send_tool_error(self, message: ToolErrorMessage) -> None: +- self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- def send_pause_assistant(self, message: PauseAssistantMessage) -> None: +- self.send_publish(message) +- +- @deprecated("Use send_publish instead.") +- def send_resume_assistant(self, message: ResumeAssistantMessage) -> None: +- self.send_publish(message) diff --git a/src/hume/expression_measurement/batch/types/inference_job.py.diff b/src/hume/expression_measurement/batch/types/inference_job.py.diff new file mode 100644 index 00000000..f2e72b13 --- /dev/null +++ b/src/hume/expression_measurement/batch/types/inference_job.py.diff @@ -0,0 +1,24 @@ +diff --git a/src/hume/expression_measurement/batch/types/inference_job.py b/src/hume/expression_measurement/batch/types/inference_job.py +index 08add412..83a68f84 100644 +--- a/src/hume/expression_measurement/batch/types/inference_job.py ++++ b/src/hume/expression_measurement/batch/types/inference_job.py +@@ -1,7 +1,6 @@ + # This file was auto-generated by Fern from our API Definition. + + import typing +-from typing_extensions import deprecated + + import pydantic + from ....core.pydantic_utilities import IS_PYDANTIC_V2 +@@ -16,11 +15,6 @@ class InferenceJob(JobInference): + Jobs created with the Expression Measurement API will have this field set to `INFERENCE`. + """ + +- @property +- @deprecated("Use .state.status instead") +- def status(self) -> str: +- return self.state.status +- + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: diff --git a/src/hume/expression_measurement/client.py.diff b/src/hume/expression_measurement/client.py.diff new file mode 100644 index 00000000..612621be --- /dev/null +++ b/src/hume/expression_measurement/client.py.diff @@ -0,0 +1,70 @@ +diff --git a/src/hume/expression_measurement/client.py b/src/hume/expression_measurement/client.py +index f75d9210..a7651e40 100644 +--- a/src/hume/expression_measurement/client.py ++++ b/src/hume/expression_measurement/client.py +@@ -8,16 +8,14 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper + from .raw_client import AsyncRawExpressionMeasurementClient, RawExpressionMeasurementClient + + if typing.TYPE_CHECKING: +- from .batch.client_with_utils import AsyncBatchClientWithUtils, BatchClientWithUtils +- from .stream.stream.client import StreamClient, AsyncStreamClient ++ from .batch.client import AsyncBatchClient, BatchClient + + + class ExpressionMeasurementClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawExpressionMeasurementClient(client_wrapper=client_wrapper) + self._client_wrapper = client_wrapper +- self._batch: typing.Optional[BatchClientWithUtils] = None +- self._stream: typing.Optional[StreamClient] = None ++ self._batch: typing.Optional[BatchClient] = None + + @property + def with_raw_response(self) -> RawExpressionMeasurementClient: +@@ -33,25 +31,17 @@ class ExpressionMeasurementClient: + @property + def batch(self): + if self._batch is None: +- from .batch.client_with_utils import BatchClientWithUtils # noqa: E402 ++ from .batch.client import BatchClient # noqa: E402 + +- self._batch = BatchClientWithUtils(client_wrapper=self._client_wrapper) ++ self._batch = BatchClient(client_wrapper=self._client_wrapper) + return self._batch + +- @property +- def stream(self): +- if self._stream is None: +- from .stream.stream.client import StreamClient # noqa: E402 +- self._stream = StreamClient(client_wrapper=self._client_wrapper) +- return self._stream +- + + class AsyncExpressionMeasurementClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawExpressionMeasurementClient(client_wrapper=client_wrapper) + self._client_wrapper = client_wrapper +- self._batch: typing.Optional[AsyncBatchClientWithUtils] = None +- self._stream: typing.Optional[AsyncStreamClient] = None ++ self._batch: typing.Optional[AsyncBatchClient] = None + + @property + def with_raw_response(self) -> AsyncRawExpressionMeasurementClient: +@@ -67,15 +57,7 @@ class AsyncExpressionMeasurementClient: + @property + def batch(self): + if self._batch is None: +- from .batch.client_with_utils import AsyncBatchClientWithUtils # noqa: E402 ++ from .batch.client import AsyncBatchClient # noqa: E402 + +- self._batch = AsyncBatchClientWithUtils(client_wrapper=self._client_wrapper) ++ self._batch = AsyncBatchClient(client_wrapper=self._client_wrapper) + return self._batch +- +- @property +- def stream(self): +- if self._stream is None: +- from .stream.stream.client import AsyncStreamClient # noqa: E402 +- +- self._stream = AsyncStreamClient(client_wrapper=self._client_wrapper) +- return self._stream diff --git a/src/hume/expression_measurement/stream/stream/socket_client.py.diff b/src/hume/expression_measurement/stream/stream/socket_client.py.diff new file mode 100644 index 00000000..ac65dfb4 --- /dev/null +++ b/src/hume/expression_measurement/stream/stream/socket_client.py.diff @@ -0,0 +1,170 @@ +diff --git a/src/hume/expression_measurement/stream/stream/socket_client.py b/src/hume/expression_measurement/stream/stream/socket_client.py +index fcd83929..85935e4e 100644 +--- a/src/hume/expression_measurement/stream/stream/socket_client.py ++++ b/src/hume/expression_measurement/stream/stream/socket_client.py +@@ -1,18 +1,13 @@ + # This file was auto-generated by Fern from our API Definition. + +-import base64 + import json + import typing + from json.decoder import JSONDecodeError +-from pathlib import Path + + import websockets + import websockets.sync.connection as websockets_sync_connection +- +-from ....core.api_error import ApiError + from ....core.events import EventEmitterMixin, EventType + from ....core.pydantic_utilities import parse_obj_as +-from .types.config import Config + from .types.stream_models_endpoint_payload import StreamModelsEndpointPayload + from .types.subscribe_event import SubscribeEvent + +@@ -83,74 +78,6 @@ class AsyncStreamSocketClient(EventEmitterMixin): + """ + await self._send(data.dict()) + +- async def send_facemesh( +- self, +- landmarks: typing.List[typing.List[typing.List[float]]], +- config: typing.Optional[Config] = None, +- payload_id: typing.Optional[str] = None, +- ) -> StreamSocketClientResponse: +- landmarks_str = json.dumps(landmarks) +- payload = { +- "data": landmarks_str, +- "models": config.dict() if config else None, +- "raw_text": False, +- "payload_id": payload_id, +- } +- payload = {k: v for k, v in payload.items() if v is not None} +- await self._websocket.send(json.dumps(payload)) +- return await self.recv() +- +- async def send_text( +- self, +- text: str, +- config: typing.Optional[Config] = None, +- payload_id: typing.Optional[str] = None, +- ) -> StreamSocketClientResponse: +- payload = { +- "data": text, +- "models": config.dict() if config else None, +- "raw_text": True, +- "payload_id": payload_id, +- } +- payload = {k: v for k, v in payload.items() if v is not None} +- await self._websocket.send(json.dumps(payload)) +- return await self.recv() +- +- async def send_file( +- self, +- file_: typing.Union[str, Path], +- config: typing.Optional[Config] = None, +- payload_id: typing.Optional[str] = None, +- ) -> StreamSocketClientResponse: +- try: +- with open(file_, "rb") as f: +- bytes_data = base64.b64encode(f.read()).decode() +- except: +- if isinstance(file_, Path): +- raise ApiError(body=f"Failed to open file: {file_}") +- # If you cannot open the file, assume you were passed a b64 string, not a file path +- bytes_data = str(file_) +- +- payload = { +- "data": bytes_data, +- "models": config.dict() if config else None, +- "raw_text": False, +- "payload_id": payload_id, +- } +- payload = {k: v for k, v in payload.items() if v is not None} +- await self._websocket.send(json.dumps(payload)) +- return await self.recv() +- +- async def get_job_details(self) -> StreamSocketClientResponse: +- payload = {"job_details": True} +- await self._websocket.send(json.dumps(payload)) +- return await self.recv() +- +- async def reset(self) -> StreamSocketClientResponse: +- payload = {"reset_stream": True} +- await self._websocket.send(json.dumps(payload)) +- return await self.recv() +- + + class StreamSocketClient(EventEmitterMixin): + def __init__(self, *, websocket: websockets_sync_connection.Connection): +@@ -210,71 +137,3 @@ class StreamSocketClient(EventEmitterMixin): + Send a Pydantic model to the websocket connection. + """ + self._send(data.dict()) +- +- def send_facemesh( +- self, +- landmarks: typing.List[typing.List[typing.List[float]]], +- config: typing.Optional[Config] = None, +- payload_id: typing.Optional[str] = None, +- ) -> StreamSocketClientResponse: +- landmarks_str = json.dumps(landmarks) +- payload = { +- "data": landmarks_str, +- "models": config.dict() if config else None, +- "raw_text": False, +- "payload_id": payload_id, +- } +- payload = {k: v for k, v in payload.items() if v is not None} +- self._websocket.send(json.dumps(payload)) +- return self.recv() +- +- def send_text( +- self, +- text: str, +- config: typing.Optional[Config] = None, +- payload_id: typing.Optional[str] = None, +- ) -> StreamSocketClientResponse: +- payload = { +- "data": text, +- "models": config.dict() if config else None, +- "raw_text": True, +- "payload_id": payload_id, +- } +- payload = {k: v for k, v in payload.items() if v is not None} +- self._websocket.send(json.dumps(payload)) +- return self.recv() +- +- def send_file( +- self, +- file_: typing.Union[str, Path], +- config: typing.Optional[Config] = None, +- payload_id: typing.Optional[str] = None, +- ) -> StreamSocketClientResponse: +- try: +- with open(file_, "rb") as f: +- bytes_data = base64.b64encode(f.read()).decode() +- except: +- if isinstance(file_, Path): +- raise ApiError(body=f"Failed to open file: {file_}") +- # If you cannot open the file, assume you were passed a b64 string, not a file path +- bytes_data = str(file_) +- +- payload = { +- "data": bytes_data, +- "models": config.dict() if config else None, +- "raw_text": False, +- "payload_id": payload_id, +- } +- payload = {k: v for k, v in payload.items() if v is not None} +- self._websocket.send(json.dumps(payload)) +- return self.recv() +- +- def get_job_details(self) -> StreamSocketClientResponse: +- payload = {"job_details": True} +- self._websocket.send(json.dumps(payload)) +- return self.recv() +- +- def reset(self) -> StreamSocketClientResponse: +- payload = {"reset_stream": True} +- self._websocket.send(json.dumps(payload)) +- return self.recv() diff --git a/src/hume/tts/client.py.diff b/src/hume/tts/client.py.diff new file mode 100644 index 00000000..f560cc99 --- /dev/null +++ b/src/hume/tts/client.py.diff @@ -0,0 +1 @@ +# No differences - file is identical in both versions