diff --git a/.fern/metadata.json b/.fern/metadata.json index 5e217e71..5dea0024 100644 --- a/.fern/metadata.json +++ b/.fern/metadata.json @@ -1,10 +1,11 @@ { "cliVersion": "3.4.3", "generatorName": "fernapi/fern-python-sdk", - "generatorVersion": "4.38.0", + "generatorVersion": "4.46.6", "generatorConfig": { "should_generate_websocket_clients": true, "pyproject_python_version": ">=3.9,<4", + "enable_wire_tests": true, "client": { "class_name": "BaseHumeClient", "filename": "base_client.py", @@ -62,5 +63,6 @@ ] } ] - } + }, + "sdkVersion": "0.13.6" } \ No newline at end of file diff --git a/.fernignore b/.fernignore index 3000115f..6fd765b0 100644 --- a/.fernignore +++ b/.fernignore @@ -19,9 +19,6 @@ src/hume/client.py src/hume/empathic_voice/chat/client.py src/hume/empathic_voice/chat/raw_client.py -# Needs to add .chat to reference chat_client -src/hume/empathic_voice/client.py - # Needs to add .stream to reference expression measurement stream client # Also wrap the .batch client in BatchClientWithUtils src/hume/expression_measurement/client.py diff --git a/poetry.lock b/poetry.lock index 3d91a011..7d8603a9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -24,22 +24,23 @@ files = [ [[package]] name = "anyio" -version = "4.12.0" +version = "4.11.0" description = "High-level concurrency and networking framework on top of asyncio or Trio" optional = false python-versions = ">=3.9" files = [ - {file = "anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb"}, - {file = "anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0"}, + {file = "anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc"}, + {file = "anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4"}, ] [package.dependencies] exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} idna = ">=2.8" +sniffio = ">=1.1" typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] -trio = ["trio (>=0.31.0)", "trio (>=0.32.0)"] +trio = ["trio (>=0.31.0)"] [[package]] name = "appnope" @@ -201,17 +202,17 @@ dev = ["backports.zoneinfo", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest [[package]] name = "beautifulsoup4" -version = "4.14.3" +version = "4.14.2" description = "Screen-scraping library" optional = true python-versions = ">=3.7.0" files = [ - {file = "beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb"}, - {file = "beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86"}, + {file = "beautifulsoup4-4.14.2-py3-none-any.whl", hash = "sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515"}, + {file = "beautifulsoup4-4.14.2.tar.gz", hash = "sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e"}, ] [package.dependencies] -soupsieve = ">=1.6.1" +soupsieve = ">1.2" typing-extensions = ">=4.0.0" [package.extras] @@ -241,13 +242,13 @@ css = ["tinycss2 (>=1.1.0,<1.5)"] [[package]] name = "certifi" -version = "2026.1.4" +version = "2025.11.12" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" files = [ - {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, - {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, + {file = "certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b"}, + {file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"}, ] [[package]] @@ -350,7 +351,7 @@ pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} name = "charset-normalizer" version = "3.4.4" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d"}, @@ -631,41 +632,41 @@ toml = ["tomli"] [[package]] name = "debugpy" -version = "1.8.19" +version = "1.8.17" description = "An implementation of the Debug Adapter Protocol for Python" optional = true python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.19-cp310-cp310-macosx_15_0_x86_64.whl", hash = "sha256:fce6da15d73be5935b4438435c53adb512326a3e11e4f90793ea87cd9f018254"}, - {file = "debugpy-1.8.19-cp310-cp310-manylinux_2_34_x86_64.whl", hash = "sha256:e24b1652a1df1ab04d81e7ead446a91c226de704ff5dde6bd0a0dbaab07aa3f2"}, - {file = "debugpy-1.8.19-cp310-cp310-win32.whl", hash = "sha256:327cb28c3ad9e17bc925efc7f7018195fd4787c2fe4b7af1eec11f1d19bdec62"}, - {file = "debugpy-1.8.19-cp310-cp310-win_amd64.whl", hash = "sha256:b7dd275cf2c99e53adb9654f5ae015f70415bbe2bacbe24cfee30d54b6aa03c5"}, - {file = "debugpy-1.8.19-cp311-cp311-macosx_15_0_universal2.whl", hash = "sha256:c5dcfa21de1f735a4f7ced4556339a109aa0f618d366ede9da0a3600f2516d8b"}, - {file = "debugpy-1.8.19-cp311-cp311-manylinux_2_34_x86_64.whl", hash = "sha256:806d6800246244004625d5222d7765874ab2d22f3ba5f615416cf1342d61c488"}, - {file = "debugpy-1.8.19-cp311-cp311-win32.whl", hash = "sha256:783a519e6dfb1f3cd773a9bda592f4887a65040cb0c7bd38dde410f4e53c40d4"}, - {file = "debugpy-1.8.19-cp311-cp311-win_amd64.whl", hash = "sha256:14035cbdbb1fe4b642babcdcb5935c2da3b1067ac211c5c5a8fdc0bb31adbcaa"}, - {file = "debugpy-1.8.19-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:bccb1540a49cde77edc7ce7d9d075c1dbeb2414751bc0048c7a11e1b597a4c2e"}, - {file = "debugpy-1.8.19-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:e9c68d9a382ec754dc05ed1d1b4ed5bd824b9f7c1a8cd1083adb84b3c93501de"}, - {file = "debugpy-1.8.19-cp312-cp312-win32.whl", hash = "sha256:6599cab8a783d1496ae9984c52cb13b7c4a3bd06a8e6c33446832a5d97ce0bee"}, - {file = "debugpy-1.8.19-cp312-cp312-win_amd64.whl", hash = "sha256:66e3d2fd8f2035a8f111eb127fa508469dfa40928a89b460b41fd988684dc83d"}, - {file = "debugpy-1.8.19-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:91e35db2672a0abaf325f4868fcac9c1674a0d9ad9bb8a8c849c03a5ebba3e6d"}, - {file = "debugpy-1.8.19-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:85016a73ab84dea1c1f1dcd88ec692993bcbe4532d1b49ecb5f3c688ae50c606"}, - {file = "debugpy-1.8.19-cp313-cp313-win32.whl", hash = "sha256:b605f17e89ba0ecee994391194285fada89cee111cfcd29d6f2ee11cbdc40976"}, - {file = "debugpy-1.8.19-cp313-cp313-win_amd64.whl", hash = "sha256:c30639998a9f9cd9699b4b621942c0179a6527f083c72351f95c6ab1728d5b73"}, - {file = "debugpy-1.8.19-cp314-cp314-macosx_15_0_universal2.whl", hash = "sha256:1e8c4d1bd230067bf1bbcdbd6032e5a57068638eb28b9153d008ecde288152af"}, - {file = "debugpy-1.8.19-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:d40c016c1f538dbf1762936e3aeb43a89b965069d9f60f9e39d35d9d25e6b809"}, - {file = "debugpy-1.8.19-cp314-cp314-win32.whl", hash = "sha256:0601708223fe1cd0e27c6cce67a899d92c7d68e73690211e6788a4b0e1903f5b"}, - {file = "debugpy-1.8.19-cp314-cp314-win_amd64.whl", hash = "sha256:8e19a725f5d486f20e53a1dde2ab8bb2c9607c40c00a42ab646def962b41125f"}, - {file = "debugpy-1.8.19-cp38-cp38-macosx_15_0_x86_64.whl", hash = "sha256:d9b6f633fd2865af2afba2beb0c1819b6ecd4aed1c8f90f5d1bbca3272306b10"}, - {file = "debugpy-1.8.19-cp38-cp38-manylinux_2_34_x86_64.whl", hash = "sha256:a21bfdea088f713df05fa246ba0520f6ba44dd7eaec224742f51987a6979a648"}, - {file = "debugpy-1.8.19-cp38-cp38-win32.whl", hash = "sha256:b1cb98e5325da3059ca24445fca48314bfddfdf65ce1b59ff07055e723f06bd2"}, - {file = "debugpy-1.8.19-cp38-cp38-win_amd64.whl", hash = "sha256:c9b9bf440141a36836bdbe4320a2b126bb38aafa85e1aed05d7bfbb0e2a278bf"}, - {file = "debugpy-1.8.19-cp39-cp39-macosx_15_0_x86_64.whl", hash = "sha256:c047177ab2d286451f242b855b650d313198c4a987140d4b35218b2855a64a4a"}, - {file = "debugpy-1.8.19-cp39-cp39-manylinux_2_34_x86_64.whl", hash = "sha256:4468de0c30012d367944f0eab4ecb8371736e8ef9522a465f61214f344c11183"}, - {file = "debugpy-1.8.19-cp39-cp39-win32.whl", hash = "sha256:7b62c0f015120ede25e5124a5f9d8a424e1208e3d96a36c89958f046ee21fff6"}, - {file = "debugpy-1.8.19-cp39-cp39-win_amd64.whl", hash = "sha256:76f566baaf7f3e06adbe67ffedccd2ee911d1e486f55931939ce3f0fe1090774"}, - {file = "debugpy-1.8.19-py2.py3-none-any.whl", hash = "sha256:360ffd231a780abbc414ba0f005dad409e71c78637efe8f2bd75837132a41d38"}, - {file = "debugpy-1.8.19.tar.gz", hash = "sha256:eea7e5987445ab0b5ed258093722d5ecb8bb72217c5c9b1e21f64efe23ddebdb"}, + {file = "debugpy-1.8.17-cp310-cp310-macosx_15_0_x86_64.whl", hash = "sha256:c41d2ce8bbaddcc0009cc73f65318eedfa3dbc88a8298081deb05389f1ab5542"}, + {file = "debugpy-1.8.17-cp310-cp310-manylinux_2_34_x86_64.whl", hash = "sha256:1440fd514e1b815edd5861ca394786f90eb24960eb26d6f7200994333b1d79e3"}, + {file = "debugpy-1.8.17-cp310-cp310-win32.whl", hash = "sha256:3a32c0af575749083d7492dc79f6ab69f21b2d2ad4cd977a958a07d5865316e4"}, + {file = "debugpy-1.8.17-cp310-cp310-win_amd64.whl", hash = "sha256:a3aad0537cf4d9c1996434be68c6c9a6d233ac6f76c2a482c7803295b4e4f99a"}, + {file = "debugpy-1.8.17-cp311-cp311-macosx_15_0_universal2.whl", hash = "sha256:d3fce3f0e3de262a3b67e69916d001f3e767661c6e1ee42553009d445d1cd840"}, + {file = "debugpy-1.8.17-cp311-cp311-manylinux_2_34_x86_64.whl", hash = "sha256:c6bdf134457ae0cac6fb68205776be635d31174eeac9541e1d0c062165c6461f"}, + {file = "debugpy-1.8.17-cp311-cp311-win32.whl", hash = "sha256:e79a195f9e059edfe5d8bf6f3749b2599452d3e9380484cd261f6b7cd2c7c4da"}, + {file = "debugpy-1.8.17-cp311-cp311-win_amd64.whl", hash = "sha256:b532282ad4eca958b1b2d7dbcb2b7218e02cb934165859b918e3b6ba7772d3f4"}, + {file = "debugpy-1.8.17-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:f14467edef672195c6f6b8e27ce5005313cb5d03c9239059bc7182b60c176e2d"}, + {file = "debugpy-1.8.17-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:24693179ef9dfa20dca8605905a42b392be56d410c333af82f1c5dff807a64cc"}, + {file = "debugpy-1.8.17-cp312-cp312-win32.whl", hash = "sha256:6a4e9dacf2cbb60d2514ff7b04b4534b0139facbf2abdffe0639ddb6088e59cf"}, + {file = "debugpy-1.8.17-cp312-cp312-win_amd64.whl", hash = "sha256:e8f8f61c518952fb15f74a302e068b48d9c4691768ade433e4adeea961993464"}, + {file = "debugpy-1.8.17-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:857c1dd5d70042502aef1c6d1c2801211f3ea7e56f75e9c335f434afb403e464"}, + {file = "debugpy-1.8.17-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:3bea3b0b12f3946e098cce9b43c3c46e317b567f79570c3f43f0b96d00788088"}, + {file = "debugpy-1.8.17-cp313-cp313-win32.whl", hash = "sha256:e34ee844c2f17b18556b5bbe59e1e2ff4e86a00282d2a46edab73fd7f18f4a83"}, + {file = "debugpy-1.8.17-cp313-cp313-win_amd64.whl", hash = "sha256:6c5cd6f009ad4fca8e33e5238210dc1e5f42db07d4b6ab21ac7ffa904a196420"}, + {file = "debugpy-1.8.17-cp314-cp314-macosx_15_0_universal2.whl", hash = "sha256:045290c010bcd2d82bc97aa2daf6837443cd52f6328592698809b4549babcee1"}, + {file = "debugpy-1.8.17-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:b69b6bd9dba6a03632534cdf67c760625760a215ae289f7489a452af1031fe1f"}, + {file = "debugpy-1.8.17-cp314-cp314-win32.whl", hash = "sha256:5c59b74aa5630f3a5194467100c3b3d1c77898f9ab27e3f7dc5d40fc2f122670"}, + {file = "debugpy-1.8.17-cp314-cp314-win_amd64.whl", hash = "sha256:893cba7bb0f55161de4365584b025f7064e1f88913551bcd23be3260b231429c"}, + {file = "debugpy-1.8.17-cp38-cp38-macosx_15_0_x86_64.whl", hash = "sha256:8deb4e31cd575c9f9370042876e078ca118117c1b5e1f22c32befcfbb6955f0c"}, + {file = "debugpy-1.8.17-cp38-cp38-manylinux_2_34_x86_64.whl", hash = "sha256:b75868b675949a96ab51abc114c7163f40ff0d8f7d6d5fd63f8932fd38e9c6d7"}, + {file = "debugpy-1.8.17-cp38-cp38-win32.whl", hash = "sha256:17e456da14848d618662354e1dccfd5e5fb75deec3d1d48dc0aa0baacda55860"}, + {file = "debugpy-1.8.17-cp38-cp38-win_amd64.whl", hash = "sha256:e851beb536a427b5df8aa7d0c7835b29a13812f41e46292ff80b2ef77327355a"}, + {file = "debugpy-1.8.17-cp39-cp39-macosx_15_0_x86_64.whl", hash = "sha256:f2ac8055a0c4a09b30b931100996ba49ef334c6947e7ae365cdd870416d7513e"}, + {file = "debugpy-1.8.17-cp39-cp39-manylinux_2_34_x86_64.whl", hash = "sha256:eaa85bce251feca8e4c87ce3b954aba84b8c645b90f0e6a515c00394a9f5c0e7"}, + {file = "debugpy-1.8.17-cp39-cp39-win32.whl", hash = "sha256:b13eea5587e44f27f6c48588b5ad56dcb74a4f3a5f89250443c94587f3eb2ea1"}, + {file = "debugpy-1.8.17-cp39-cp39-win_amd64.whl", hash = "sha256:bb1bbf92317e1f35afcf3ef0450219efb3afe00be79d8664b250ac0933b9015f"}, + {file = "debugpy-1.8.17-py2.py3-none-any.whl", hash = "sha256:60c7dca6571efe660ccb7a9508d73ca14b8796c4ed484c2002abba714226cfef"}, + {file = "debugpy-1.8.17.tar.gz", hash = "sha256:fd723b47a8c08892b1a16b2c6239a8b96637c62a59b94bb5dab4bac592a58a8e"}, ] [[package]] @@ -721,13 +722,13 @@ tests = ["pytest"] [[package]] name = "exceptiongroup" -version = "1.3.1" +version = "1.3.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"}, - {file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"}, + {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, + {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, ] [package.dependencies] @@ -861,13 +862,13 @@ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2 [[package]] name = "importlib-metadata" -version = "8.7.1" +version = "8.7.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.9" files = [ - {file = "importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151"}, - {file = "importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb"}, + {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, + {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, ] [package.dependencies] @@ -877,10 +878,10 @@ zipp = ">=3.20" check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=3.4)"] +enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] -type = ["mypy (<1.19)", "pytest-mypy (>=1.0.1)"] +test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] [[package]] name = "iniconfig" @@ -1050,15 +1051,18 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "json5" -version = "0.13.0" +version = "0.12.1" description = "A Python implementation of the JSON5 data format." optional = true python-versions = ">=3.8.0" files = [ - {file = "json5-0.13.0-py3-none-any.whl", hash = "sha256:9a08e1dd65f6a4d4c6fa82d216cf2477349ec2346a38fd70cc11d2557499fbcc"}, - {file = "json5-0.13.0.tar.gz", hash = "sha256:b1edf8d487721c0bf64d83c28e91280781f6e21f4a797d3261c7c828d4c165bf"}, + {file = "json5-0.12.1-py3-none-any.whl", hash = "sha256:d9c9b3bc34a5f54d43c35e11ef7cb87d8bdd098c6ace87117a7b7e83e705c1d5"}, + {file = "json5-0.12.1.tar.gz", hash = "sha256:b2743e77b3242f8d03c143dd975a6ec7c52e2f2afe76ed934e53503dd4ad4990"}, ] +[package.extras] +dev = ["build (==1.2.2.post1)", "coverage (==7.5.4)", "coverage (==7.8.0)", "mypy (==1.14.1)", "mypy (==1.15.0)", "pip (==25.0.1)", "pylint (==3.2.7)", "pylint (==3.3.6)", "ruff (==0.11.2)", "twine (==6.1.0)", "uv (==0.6.11)"] + [[package]] name = "jsonpointer" version = "3.0.0" @@ -1298,13 +1302,13 @@ test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (> [[package]] name = "jupyterlab" -version = "4.5.1" +version = "4.5.0" description = "JupyterLab computational environment" optional = true python-versions = ">=3.9" files = [ - {file = "jupyterlab-4.5.1-py3-none-any.whl", hash = "sha256:31b059de96de0754ff1f2ce6279774b6aab8c34d7082e9752db58207c99bd514"}, - {file = "jupyterlab-4.5.1.tar.gz", hash = "sha256:09da1ddfbd9eec18b5101dbb8515612aa1e47443321fb99503725a88e93d20d9"}, + {file = "jupyterlab-4.5.0-py3-none-any.whl", hash = "sha256:88e157c75c1afff64c7dc4b801ec471450b922a4eae4305211ddd40da8201c8a"}, + {file = "jupyterlab-4.5.0.tar.gz", hash = "sha256:aec33d6d8f1225b495ee2cf20f0514f45e6df8e360bdd7ac9bace0b7ac5177ea"}, ] [package.dependencies] @@ -1577,13 +1581,13 @@ files = [ [[package]] name = "mistune" -version = "3.2.0" +version = "3.1.4" description = "A sane and fast Markdown parser with useful plugins and renderers" optional = true python-versions = ">=3.8" files = [ - {file = "mistune-3.2.0-py3-none-any.whl", hash = "sha256:febdc629a3c78616b94393c6580551e0e34cc289987ec6c35ed3f4be42d0eee1"}, - {file = "mistune-3.2.0.tar.gz", hash = "sha256:708487c8a8cdd99c9d90eb3ed4c3ed961246ff78ac82f03418f5183ab70e398a"}, + {file = "mistune-3.1.4-py3-none-any.whl", hash = "sha256:93691da911e5d9d2e23bc54472892aff676df27a75274962ff9edc210364266d"}, + {file = "mistune-3.1.4.tar.gz", hash = "sha256:b5a7f801d389f724ec702840c11d8fc48f2b33519102fc7ee739e8177b672164"}, ] [package.dependencies] @@ -1746,18 +1750,18 @@ files = [ [[package]] name = "notebook" -version = "7.5.1" +version = "7.5.0" description = "Jupyter Notebook - A web-based notebook environment for interactive computing" optional = true python-versions = ">=3.9" files = [ - {file = "notebook-7.5.1-py3-none-any.whl", hash = "sha256:f4e2451c19910c33b88709b84537e11f6368c1cdff1aa0c43db701aea535dd44"}, - {file = "notebook-7.5.1.tar.gz", hash = "sha256:b2fb4cef4d47d08c33aecce1c6c6e84be05436fbd791f88fce8df9fbca088b75"}, + {file = "notebook-7.5.0-py3-none-any.whl", hash = "sha256:3300262d52905ca271bd50b22617681d95f08a8360d099e097726e6d2efb5811"}, + {file = "notebook-7.5.0.tar.gz", hash = "sha256:3b27eaf9913033c28dde92d02139414c608992e1df4b969c843219acf2ff95e4"}, ] [package.dependencies] jupyter-server = ">=2.4.0,<3" -jupyterlab = ">=4.5.1,<4.6" +jupyterlab = ">=4.5.0rc0,<4.6" jupyterlab-server = ">=2.28.0,<3" notebook-shim = ">=0.2,<0.3" tornado = ">=6.2.0" @@ -1907,37 +1911,35 @@ wcwidth = "*" [[package]] name = "psutil" -version = "7.2.1" +version = "7.1.3" description = "Cross-platform lib for process and system monitoring." optional = true python-versions = ">=3.6" files = [ - {file = "psutil-7.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ba9f33bb525b14c3ea563b2fd521a84d2fa214ec59e3e6a2858f78d0844dd60d"}, - {file = "psutil-7.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:81442dac7abfc2f4f4385ea9e12ddf5a796721c0f6133260687fec5c3780fa49"}, - {file = "psutil-7.2.1-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ea46c0d060491051d39f0d2cff4f98d5c72b288289f57a21556cc7d504db37fc"}, - {file = "psutil-7.2.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35630d5af80d5d0d49cfc4d64c1c13838baf6717a13effb35869a5919b854cdf"}, - {file = "psutil-7.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:923f8653416604e356073e6e0bccbe7c09990acef442def2f5640dd0faa9689f"}, - {file = "psutil-7.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cfbe6b40ca48019a51827f20d830887b3107a74a79b01ceb8cc8de4ccb17b672"}, - {file = "psutil-7.2.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:494c513ccc53225ae23eec7fe6e1482f1b8a44674241b54561f755a898650679"}, - {file = "psutil-7.2.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3fce5f92c22b00cdefd1645aa58ab4877a01679e901555067b1bd77039aa589f"}, - {file = "psutil-7.2.1-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93f3f7b0bb07711b49626e7940d6fe52aa9940ad86e8f7e74842e73189712129"}, - {file = "psutil-7.2.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d34d2ca888208eea2b5c68186841336a7f5e0b990edec929be909353a202768a"}, - {file = "psutil-7.2.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2ceae842a78d1603753561132d5ad1b2f8a7979cb0c283f5b52fb4e6e14b1a79"}, - {file = "psutil-7.2.1-cp314-cp314t-win_arm64.whl", hash = "sha256:08a2f175e48a898c8eb8eace45ce01777f4785bc744c90aa2cc7f2fa5462a266"}, - {file = "psutil-7.2.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2e953fcfaedcfbc952b44744f22d16575d3aa78eb4f51ae74165b4e96e55f42"}, - {file = "psutil-7.2.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:05cc68dbb8c174828624062e73078e7e35406f4ca2d0866c272c2410d8ef06d1"}, - {file = "psutil-7.2.1-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e38404ca2bb30ed7267a46c02f06ff842e92da3bb8c5bfdadbd35a5722314d8"}, - {file = "psutil-7.2.1-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ab2b98c9fc19f13f59628d94df5cc4cc4844bc572467d113a8b517d634e362c6"}, - {file = "psutil-7.2.1-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f78baafb38436d5a128f837fab2d92c276dfb48af01a240b861ae02b2413ada8"}, - {file = "psutil-7.2.1-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:99a4cd17a5fdd1f3d014396502daa70b5ec21bf4ffe38393e152f8e449757d67"}, - {file = "psutil-7.2.1-cp37-abi3-win_amd64.whl", hash = "sha256:b1b0671619343aa71c20ff9767eced0483e4fc9e1f489d50923738caf6a03c17"}, - {file = "psutil-7.2.1-cp37-abi3-win_arm64.whl", hash = "sha256:0d67c1822c355aa6f7314d92018fb4268a76668a536f133599b91edd48759442"}, - {file = "psutil-7.2.1.tar.gz", hash = "sha256:f7583aec590485b43ca601dd9cea0dcd65bd7bb21d30ef4ddbf4ea6b5ed1bdd3"}, -] - -[package.extras] -dev = ["abi3audit", "black", "check-manifest", "coverage", "packaging", "psleak", "pylint", "pyperf", "pypinfo", "pytest", "pytest-cov", "pytest-instafail", "pytest-xdist", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "validate-pyproject[all]", "virtualenv", "vulture", "wheel"] -test = ["psleak", "pytest", "pytest-instafail", "pytest-xdist", "setuptools"] + {file = "psutil-7.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0005da714eee687b4b8decd3d6cc7c6db36215c9e74e5ad2264b90c3df7d92dc"}, + {file = "psutil-7.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19644c85dcb987e35eeeaefdc3915d059dac7bd1167cdcdbf27e0ce2df0c08c0"}, + {file = "psutil-7.1.3-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95ef04cf2e5ba0ab9eaafc4a11eaae91b44f4ef5541acd2ee91d9108d00d59a7"}, + {file = "psutil-7.1.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1068c303be3a72f8e18e412c5b2a8f6d31750fb152f9cb106b54090296c9d251"}, + {file = "psutil-7.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:18349c5c24b06ac5612c0428ec2a0331c26443d259e2a0144a9b24b4395b58fa"}, + {file = "psutil-7.1.3-cp313-cp313t-win_arm64.whl", hash = "sha256:c525ffa774fe4496282fb0b1187725793de3e7c6b29e41562733cae9ada151ee"}, + {file = "psutil-7.1.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b403da1df4d6d43973dc004d19cee3b848e998ae3154cc8097d139b77156c353"}, + {file = "psutil-7.1.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ad81425efc5e75da3f39b3e636293360ad8d0b49bed7df824c79764fb4ba9b8b"}, + {file = "psutil-7.1.3-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f33a3702e167783a9213db10ad29650ebf383946e91bc77f28a5eb083496bc9"}, + {file = "psutil-7.1.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fac9cd332c67f4422504297889da5ab7e05fd11e3c4392140f7370f4208ded1f"}, + {file = "psutil-7.1.3-cp314-cp314t-win_amd64.whl", hash = "sha256:3792983e23b69843aea49c8f5b8f115572c5ab64c153bada5270086a2123c7e7"}, + {file = "psutil-7.1.3-cp314-cp314t-win_arm64.whl", hash = "sha256:31d77fcedb7529f27bb3a0472bea9334349f9a04160e8e6e5020f22c59893264"}, + {file = "psutil-7.1.3-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2bdbcd0e58ca14996a42adf3621a6244f1bb2e2e528886959c72cf1e326677ab"}, + {file = "psutil-7.1.3-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31fa00f1fbc3c3802141eede66f3a2d51d89716a194bf2cd6fc68310a19880"}, + {file = "psutil-7.1.3-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bb428f9f05c1225a558f53e30ccbad9930b11c3fc206836242de1091d3e7dd3"}, + {file = "psutil-7.1.3-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56d974e02ca2c8eb4812c3f76c30e28836fffc311d55d979f1465c1feeb2b68b"}, + {file = "psutil-7.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:f39c2c19fe824b47484b96f9692932248a54c43799a84282cfe58d05a6449efd"}, + {file = "psutil-7.1.3-cp37-abi3-win_arm64.whl", hash = "sha256:bd0d69cee829226a761e92f28140bec9a5ee9d5b4fb4b0cc589068dbfff559b1"}, + {file = "psutil-7.1.3.tar.gz", hash = "sha256:6c86281738d77335af7aec228328e944b30930899ea760ecf33a4dba66be5e74"}, +] + +[package.extras] +dev = ["abi3audit", "black", "check-manifest", "colorama", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pyreadline", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-xdist", "pywin32", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "validate-pyproject[all]", "virtualenv", "vulture", "wheel", "wheel", "wmi"] +test = ["pytest", "pytest-instafail", "pytest-subtests", "pytest-xdist", "pywin32", "setuptools", "wheel", "wmi"] [[package]] name = "ptyprocess" @@ -1977,13 +1979,13 @@ files = [ [[package]] name = "pydantic" -version = "2.12.5" +version = "2.12.4" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" files = [ - {file = "pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d"}, - {file = "pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49"}, + {file = "pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e"}, + {file = "pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac"}, ] [package.dependencies] @@ -2565,7 +2567,7 @@ typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""} name = "requests" version = "2.32.5" description = "Python HTTP for Humans." -optional = true +optional = false python-versions = ">=3.9" files = [ {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, @@ -2828,13 +2830,13 @@ files = [ [[package]] name = "send2trash" -version = "2.0.0" +version = "1.8.3" description = "Send file to trash natively under Mac OS X, Windows and Linux" optional = true -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "send2trash-2.0.0-py3-none-any.whl", hash = "sha256:e70d5ce41dbb890882cc78bc25d137478330b39a391e756fadf82e34da4d85b8"}, - {file = "send2trash-2.0.0.tar.gz", hash = "sha256:1761421da3f9930bfe51ed7c45343948573383ad4c27e3acebc91be324e7770d"}, + {file = "Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9"}, + {file = "Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf"}, ] [package.extras] @@ -2873,6 +2875,17 @@ files = [ {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + [[package]] name = "snowballstemmer" version = "3.0.1" @@ -2906,13 +2919,13 @@ numpy = ["NumPy"] [[package]] name = "soupsieve" -version = "2.8.1" +version = "2.8" description = "A modern CSS selector implementation for Beautiful Soup." optional = true python-versions = ">=3.9" files = [ - {file = "soupsieve-2.8.1-py3-none-any.whl", hash = "sha256:a11fe2a6f3d76ab3cf2de04eb339c1be5b506a8a47f2ceb6d139803177f85434"}, - {file = "soupsieve-2.8.1.tar.gz", hash = "sha256:4cf733bc50fa805f5df4b8ef4740fc0e0fa6218cf3006269afd3f9d6d80fd350"}, + {file = "soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c"}, + {file = "soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f"}, ] [[package]] @@ -3067,23 +3080,23 @@ files = [ [[package]] name = "tornado" -version = "6.5.4" +version = "6.5.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = false python-versions = ">=3.9" files = [ - {file = "tornado-6.5.4-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d6241c1a16b1c9e4cc28148b1cda97dd1c6cb4fb7068ac1bedc610768dff0ba9"}, - {file = "tornado-6.5.4-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2d50f63dda1d2cac3ae1fa23d254e16b5e38153758470e9956cbc3d813d40843"}, - {file = "tornado-6.5.4-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1cf66105dc6acb5af613c054955b8137e34a03698aa53272dbda4afe252be17"}, - {file = "tornado-6.5.4-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50ff0a58b0dc97939d29da29cd624da010e7f804746621c78d14b80238669335"}, - {file = "tornado-6.5.4-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5fb5e04efa54cf0baabdd10061eb4148e0be137166146fff835745f59ab9f7f"}, - {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9c86b1643b33a4cd415f8d0fe53045f913bf07b4a3ef646b735a6a86047dda84"}, - {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:6eb82872335a53dd063a4f10917b3efd28270b56a33db69009606a0312660a6f"}, - {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6076d5dda368c9328ff41ab5d9dd3608e695e8225d1cd0fd1e006f05da3635a8"}, - {file = "tornado-6.5.4-cp39-abi3-win32.whl", hash = "sha256:1768110f2411d5cd281bac0a090f707223ce77fd110424361092859e089b38d1"}, - {file = "tornado-6.5.4-cp39-abi3-win_amd64.whl", hash = "sha256:fa07d31e0cd85c60713f2b995da613588aa03e1303d75705dca6af8babc18ddc"}, - {file = "tornado-6.5.4-cp39-abi3-win_arm64.whl", hash = "sha256:053e6e16701eb6cbe641f308f4c1a9541f91b6261991160391bfc342e8a551a1"}, - {file = "tornado-6.5.4.tar.gz", hash = "sha256:a22fa9047405d03260b483980635f0b041989d8bcc9a313f8fe18b411d84b1d7"}, + {file = "tornado-6.5.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6"}, + {file = "tornado-6.5.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef"}, + {file = "tornado-6.5.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e"}, + {file = "tornado-6.5.2-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882"}, + {file = "tornado-6.5.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108"}, + {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c"}, + {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4"}, + {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04"}, + {file = "tornado-6.5.2-cp39-abi3-win32.whl", hash = "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0"}, + {file = "tornado-6.5.2-cp39-abi3-win_amd64.whl", hash = "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f"}, + {file = "tornado-6.5.2-cp39-abi3-win_arm64.whl", hash = "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af"}, + {file = "tornado-6.5.2.tar.gz", hash = "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0"}, ] [[package]] @@ -3123,6 +3136,20 @@ files = [ {file = "types_python_dateutil-2.9.0.20251115.tar.gz", hash = "sha256:8a47f2c3920f52a994056b8786309b43143faa5a64d4cbb2722d6addabdf1a58"}, ] +[[package]] +name = "types-requests" +version = "2.32.4.20250913" +description = "Typing stubs for requests" +optional = false +python-versions = ">=3.9" +files = [ + {file = "types_requests-2.32.4.20250913-py3-none-any.whl", hash = "sha256:78c9c1fffebbe0fa487a418e0fa5252017e9c60d1a2da394077f1780f655d7e1"}, + {file = "types_requests-2.32.4.20250913.tar.gz", hash = "sha256:abd6d4f9ce3a9383f269775a9835a4c24e5cd6b9f647d64f88aa4613c33def5d"}, +] + +[package.dependencies] +urllib3 = ">=2" + [[package]] name = "typing-extensions" version = "4.15.0" @@ -3150,13 +3177,13 @@ typing-extensions = ">=4.12.0" [[package]] name = "tzdata" -version = "2025.3" +version = "2025.2" description = "Provider of IANA time zone data" optional = true python-versions = ">=2" files = [ - {file = "tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1"}, - {file = "tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7"}, + {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, + {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, ] [[package]] @@ -3175,20 +3202,20 @@ dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake [[package]] name = "urllib3" -version = "2.6.2" +version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = true +optional = false python-versions = ">=3.9" files = [ - {file = "urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd"}, - {file = "urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797"}, + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] [package.extras] -brotli = ["brotli (>=1.2.0)", "brotlicffi (>=1.2.0.0)"] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["backports-zstd (>=1.0.0)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "wcwidth" @@ -3461,4 +3488,4 @@ microphone = ["sounddevice"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "21dc70bd84ed76ce468578059bb2dc18f0573e9eeefdd25aca8e4baf84517977" +content-hash = "dc6a0b8b9b35b2ea915847c943e865f7eda885795ef884b449346c477a19d83f" diff --git a/pyproject.toml b/pyproject.toml index 3a9608dc..675b724b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,6 @@ [project] name = "hume" +dynamic = ["version"] [tool.poetry] name = "hume" @@ -77,6 +78,8 @@ pytest-asyncio = "^0.23.5" pytest-xdist = "^3.6.1" python-dateutil = "^2.9.0" types-python-dateutil = "^2.9.0.20240316" +requests = "^2.31.0" +types-requests = "^2.31.0" covcheck = { version = "^0.4.3", extras = ["toml"]} pydocstyle = "^6.1.1" pydub-stubs = "^0.25.1" diff --git a/reference.md b/reference.md index 31499bf3..595ceb89 100644 --- a/reference.md +++ b/reference.md @@ -1,6 +1,6 @@ # Reference ## Tts -
client.tts.synthesize_json(...) +
client.tts.synthesize_json(...) -> AsyncHttpResponse[ReturnTts]
@@ -174,7 +174,7 @@ For a comparison of Octave versions, see the [Octave versions](/docs/text-to-spe
-
client.tts.synthesize_file(...) +
client.tts.synthesize_file(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
@@ -343,7 +343,7 @@ For a comparison of Octave versions, see the [Octave versions](/docs/text-to-spe
-
client.tts.synthesize_file_streaming(...) +
client.tts.synthesize_file_streaming(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
@@ -508,7 +508,7 @@ For a comparison of Octave versions, see the [Octave versions](/docs/text-to-spe
-
client.tts.synthesize_json_streaming(...) +
client.tts.synthesize_json_streaming(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[TtsOutput]]]
@@ -677,7 +677,7 @@ For a comparison of Octave versions, see the [Octave versions](/docs/text-to-spe
-
client.tts.convert_voice_json(...) +
client.tts.convert_voice_json(...) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[TtsOutput]]]
@@ -776,7 +776,7 @@ typing.Optional[core.File]` — See core.File for more documentation
## Tts Voices -
client.tts.voices.list(...) +
client.tts.voices.list(...) -> AsyncPager[ReturnVoice, ReturnPagedVoices]
@@ -896,7 +896,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.tts.voices.create(...) +
client.tts.voices.create(...) -> AsyncHttpResponse[ReturnVoice]
@@ -977,7 +977,7 @@ client.tts.voices.create(
-
client.tts.voices.delete(...) +
client.tts.voices.delete(...) -> AsyncHttpResponse[None]
@@ -1048,7 +1048,7 @@ client.tts.voices.delete(
## EmpathicVoice ControlPlane -
client.empathic_voice.control_plane.send(...) +
client.empathic_voice.control_plane.send(...) -> AsyncHttpResponse[None]
@@ -1129,7 +1129,7 @@ client.empathic_voice.control_plane.send(
## EmpathicVoice ChatGroups -
client.empathic_voice.chat_groups.list_chat_groups(...) +
client.empathic_voice.chat_groups.list_chat_groups(...) -> AsyncPager[ReturnChatGroup, ReturnPagedChatGroups]
@@ -1225,7 +1225,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.chat_groups.get_chat_group(...) +
client.empathic_voice.chat_groups.get_chat_group(...) -> AsyncHttpResponse[ReturnChatGroupPagedChats]
@@ -1324,7 +1324,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-
client.empathic_voice.chat_groups.get_audio(...) +
client.empathic_voice.chat_groups.get_audio(...) -> AsyncHttpResponse[ReturnChatGroupPagedAudioReconstructions]
@@ -1415,7 +1415,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.chat_groups.list_chat_group_events(...) +
client.empathic_voice.chat_groups.list_chat_group_events(...) -> AsyncPager[ReturnChatEvent, ReturnChatGroupPagedEvents]
@@ -1512,7 +1512,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
## EmpathicVoice Chats -
client.empathic_voice.chats.list_chats(...) +
client.empathic_voice.chats.list_chats(...) -> AsyncPager[ReturnChat, ReturnPagedChats]
@@ -1615,7 +1615,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.chats.list_chat_events(...) +
client.empathic_voice.chats.list_chat_events(...) -> AsyncPager[ReturnChatEvent, ReturnChatPagedEvents]
@@ -1711,7 +1711,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-
client.empathic_voice.chats.get_audio(...) +
client.empathic_voice.chats.get_audio(...) -> AsyncHttpResponse[ReturnChatAudioReconstruction]
@@ -1768,7 +1768,7 @@ client.empathic_voice.chats.get_audio(
## EmpathicVoice Configs -
client.empathic_voice.configs.list_configs(...) +
client.empathic_voice.configs.list_configs(...) -> AsyncPager[ReturnConfig, ReturnPagedConfigs]
@@ -1862,7 +1862,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config(...) +
client.empathic_voice.configs.create_config(...) -> AsyncHttpResponse[ReturnConfig]
@@ -2049,7 +2049,7 @@ client.empathic_voice.configs.create_config(
-
client.empathic_voice.configs.list_config_versions(...) +
client.empathic_voice.configs.list_config_versions(...) -> AsyncPager[ReturnConfig, ReturnPagedConfigs]
@@ -2142,7 +2142,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config_version(...) +
client.empathic_voice.configs.create_config_version(...) -> AsyncHttpResponse[ReturnConfig]
@@ -2334,7 +2334,7 @@ client.empathic_voice.configs.create_config_version(
-
client.empathic_voice.configs.delete_config(...) +
client.empathic_voice.configs.delete_config(...) -> AsyncHttpResponse[None]
@@ -2390,7 +2390,7 @@ client.empathic_voice.configs.delete_config(
-
client.empathic_voice.configs.update_config_name(...) +
client.empathic_voice.configs.update_config_name(...) -> AsyncHttpResponse[str]
@@ -2455,7 +2455,7 @@ client.empathic_voice.configs.update_config_name(
-
client.empathic_voice.configs.get_config_version(...) +
client.empathic_voice.configs.get_config_version(...) -> AsyncHttpResponse[ReturnConfig]
@@ -2520,7 +2520,7 @@ client.empathic_voice.configs.get_config_version(
-
client.empathic_voice.configs.delete_config_version(...) +
client.empathic_voice.configs.delete_config_version(...) -> AsyncHttpResponse[None]
@@ -2585,7 +2585,7 @@ client.empathic_voice.configs.delete_config_version(
-
client.empathic_voice.configs.update_config_description(...) +
client.empathic_voice.configs.update_config_description(...) -> AsyncHttpResponse[ReturnConfig]
@@ -2660,7 +2660,7 @@ client.empathic_voice.configs.update_config_description(
## EmpathicVoice Prompts -
client.empathic_voice.prompts.list_prompts(...) +
client.empathic_voice.prompts.list_prompts(...) -> AsyncPager[typing.Optional[ReturnPrompt], ReturnPagedPrompts]
@@ -2754,7 +2754,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt(...) +
client.empathic_voice.prompts.create_prompt(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]]
@@ -2827,7 +2827,7 @@ client.empathic_voice.prompts.create_prompt(
-
client.empathic_voice.prompts.list_prompt_versions(...) +
client.empathic_voice.prompts.list_prompt_versions(...) -> AsyncHttpResponse[ReturnPagedPrompts]
@@ -2915,7 +2915,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt_version(...) +
client.empathic_voice.prompts.create_prompt_version(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]]
@@ -2989,7 +2989,7 @@ client.empathic_voice.prompts.create_prompt_version(
-
client.empathic_voice.prompts.delete_prompt(...) +
client.empathic_voice.prompts.delete_prompt(...) -> AsyncHttpResponse[None]
@@ -3045,7 +3045,7 @@ client.empathic_voice.prompts.delete_prompt(
-
client.empathic_voice.prompts.update_prompt_name(...) +
client.empathic_voice.prompts.update_prompt_name(...) -> AsyncHttpResponse[str]
@@ -3110,7 +3110,7 @@ client.empathic_voice.prompts.update_prompt_name(
-
client.empathic_voice.prompts.get_prompt_version(...) +
client.empathic_voice.prompts.get_prompt_version(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]]
@@ -3175,7 +3175,7 @@ client.empathic_voice.prompts.get_prompt_version(
-
client.empathic_voice.prompts.delete_prompt_version(...) +
client.empathic_voice.prompts.delete_prompt_version(...) -> AsyncHttpResponse[None]
@@ -3240,7 +3240,7 @@ client.empathic_voice.prompts.delete_prompt_version(
-
client.empathic_voice.prompts.update_prompt_description(...) +
client.empathic_voice.prompts.update_prompt_description(...) -> AsyncHttpResponse[typing.Optional[ReturnPrompt]]
@@ -3315,7 +3315,7 @@ client.empathic_voice.prompts.update_prompt_description(
## EmpathicVoice Tools -
client.empathic_voice.tools.list_tools(...) +
client.empathic_voice.tools.list_tools(...) -> AsyncPager[typing.Optional[ReturnUserDefinedTool], ReturnPagedUserDefinedTools]
@@ -3409,7 +3409,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool(...) +
client.empathic_voice.tools.create_tool(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]]
@@ -3501,7 +3501,7 @@ client.empathic_voice.tools.create_tool(
-
client.empathic_voice.tools.list_tool_versions(...) +
client.empathic_voice.tools.list_tool_versions(...) -> AsyncPager[typing.Optional[ReturnUserDefinedTool], ReturnPagedUserDefinedTools]
@@ -3594,7 +3594,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool_version(...) +
client.empathic_voice.tools.create_tool_version(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]]
@@ -3686,7 +3686,7 @@ client.empathic_voice.tools.create_tool_version(
-
client.empathic_voice.tools.delete_tool(...) +
client.empathic_voice.tools.delete_tool(...) -> AsyncHttpResponse[None]
@@ -3742,7 +3742,7 @@ client.empathic_voice.tools.delete_tool(
-
client.empathic_voice.tools.update_tool_name(...) +
client.empathic_voice.tools.update_tool_name(...) -> AsyncHttpResponse[str]
@@ -3807,7 +3807,7 @@ client.empathic_voice.tools.update_tool_name(
-
client.empathic_voice.tools.get_tool_version(...) +
client.empathic_voice.tools.get_tool_version(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]]
@@ -3872,7 +3872,7 @@ client.empathic_voice.tools.get_tool_version(
-
client.empathic_voice.tools.delete_tool_version(...) +
client.empathic_voice.tools.delete_tool_version(...) -> AsyncHttpResponse[None]
@@ -3937,7 +3937,7 @@ client.empathic_voice.tools.delete_tool_version(
-
client.empathic_voice.tools.update_tool_description(...) +
client.empathic_voice.tools.update_tool_description(...) -> AsyncHttpResponse[typing.Optional[ReturnUserDefinedTool]]
@@ -4012,7 +4012,7 @@ client.empathic_voice.tools.update_tool_description(
## ExpressionMeasurement Batch -
client.expression_measurement.batch.list_jobs(...) +
client.expression_measurement.batch.list_jobs(...) -> AsyncHttpResponse[typing.List[UnionJob]]
@@ -4148,7 +4148,7 @@ Specify the order in which to sort the jobs. Defaults to descending order.
-
client.expression_measurement.batch.start_inference_job(...) +
client.expression_measurement.batch.start_inference_job(...) -> AsyncHttpResponse[str]
@@ -4267,7 +4267,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-
client.expression_measurement.batch.get_job_details(...) +
client.expression_measurement.batch.get_job_details(...) -> AsyncHttpResponse[UnionJob]
@@ -4337,7 +4337,7 @@ client.expression_measurement.batch.get_job_details(
-
client.expression_measurement.batch.get_job_predictions(...) +
client.expression_measurement.batch.get_job_predictions(...) -> AsyncHttpResponse[typing.List[UnionPredictResult]]
@@ -4407,7 +4407,7 @@ client.expression_measurement.batch.get_job_predictions(
-
client.expression_measurement.batch.start_inference_job_from_local_file(...) +
client.expression_measurement.batch.start_inference_job_from_local_file(...) -> AsyncHttpResponse[str]
diff --git a/src/hume/core/__init__.py b/src/hume/core/__init__.py index 3fbbac69..f2e41c82 100644 --- a/src/hume/core/__init__.py +++ b/src/hume/core/__init__.py @@ -8,6 +8,7 @@ if typing.TYPE_CHECKING: from .api_error import ApiError from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper + from .custom_pagination import AsyncCustomPager, SyncCustomPager from .datetime_utils import serialize_datetime from .events import EventEmitterMixin, EventType from .file import File, convert_file_dict_to_httpx_tuples, with_content_type @@ -31,6 +32,7 @@ _dynamic_imports: typing.Dict[str, str] = { "ApiError": ".api_error", "AsyncClientWrapper": ".client_wrapper", + "AsyncCustomPager": ".custom_pagination", "AsyncHttpClient": ".http_client", "AsyncHttpResponse": ".http_response", "AsyncPager": ".pagination", @@ -44,6 +46,7 @@ "IS_PYDANTIC_V2": ".pydantic_utilities", "RequestOptions": ".request_options", "SyncClientWrapper": ".client_wrapper", + "SyncCustomPager": ".custom_pagination", "SyncPager": ".pagination", "UniversalBaseModel": ".pydantic_utilities", "UniversalRootModel": ".pydantic_utilities", @@ -85,6 +88,7 @@ def __dir__(): __all__ = [ "ApiError", "AsyncClientWrapper", + "AsyncCustomPager", "AsyncHttpClient", "AsyncHttpResponse", "AsyncPager", @@ -98,6 +102,7 @@ def __dir__(): "IS_PYDANTIC_V2", "RequestOptions", "SyncClientWrapper", + "SyncCustomPager", "SyncPager", "UniversalBaseModel", "UniversalRootModel", diff --git a/src/hume/core/client_wrapper.py b/src/hume/core/client_wrapper.py index d7a684b1..75595bc0 100644 --- a/src/hume/core/client_wrapper.py +++ b/src/hume/core/client_wrapper.py @@ -67,9 +67,21 @@ def __init__( headers: typing.Optional[typing.Dict[str, str]] = None, environment: HumeClientEnvironment, timeout: typing.Optional[float] = None, + async_token: typing.Optional[typing.Callable[[], typing.Awaitable[str]]] = None, httpx_client: httpx.AsyncClient, ): super().__init__(api_key=api_key, headers=headers, environment=environment, timeout=timeout) + self._async_token = async_token self.httpx_client = AsyncHttpClient( - httpx_client=httpx_client, base_headers=self.get_headers, base_timeout=self.get_timeout + httpx_client=httpx_client, + base_headers=self.get_headers, + base_timeout=self.get_timeout, + async_base_headers=self.async_get_headers, ) + + async def async_get_headers(self) -> typing.Dict[str, str]: + headers = self.get_headers() + if self._async_token is not None: + token = await self._async_token() + headers["Authorization"] = f"Bearer {token}" + return headers diff --git a/src/hume/core/custom_pagination.py b/src/hume/core/custom_pagination.py new file mode 100644 index 00000000..5de2c7a8 --- /dev/null +++ b/src/hume/core/custom_pagination.py @@ -0,0 +1,152 @@ +# This file was auto-generated by Fern from our API Definition. + +""" +Custom Pagination Support + +This file is designed to be modified by SDK users to implement their own +pagination logic. The generator will import SyncCustomPager and AsyncCustomPager +from this module when custom pagination is used. + +Users should: +1. Implement their custom pager (e.g., PayrocPager, MyCustomPager, etc.) +2. Create adapter classes (SyncCustomPager/AsyncCustomPager) that bridge + between the generated SDK code and their custom pager implementation +""" + +from __future__ import annotations + +from typing import Any, AsyncIterator, Generic, Iterator, TypeVar + +# Import the base utilities you'll need +# Adjust these imports based on your actual structure +try: + from .client_wrapper import AsyncClientWrapper, SyncClientWrapper +except ImportError: + # Fallback for type hints + AsyncClientWrapper = Any # type: ignore + SyncClientWrapper = Any # type: ignore + +TItem = TypeVar("TItem") +TResponse = TypeVar("TResponse") + + +class SyncCustomPager(Generic[TItem, TResponse]): + """ + Adapter for custom synchronous pagination. + + The generator will call this with: + SyncCustomPager(initial_response=response, client_wrapper=client_wrapper) + + Implement this class to extract pagination metadata from your response + and delegate to your custom pager implementation. + + Example implementation: + + class SyncCustomPager(Generic[TItem, TResponse]): + def __init__( + self, + *, + initial_response: TResponse, + client_wrapper: SyncClientWrapper, + ): + # Extract data and pagination metadata from response + data = initial_response.data # Adjust based on your response structure + links = initial_response.links + + # Initialize your custom pager + self._pager = MyCustomPager( + current_page=Page(data), + httpx_client=client_wrapper.httpx_client, + get_headers=client_wrapper.get_headers, + # ... other parameters + ) + + def __iter__(self): + return iter(self._pager) + + # Delegate other methods to your pager... + """ + + def __init__( + self, + *, + initial_response: TResponse, + client_wrapper: SyncClientWrapper, + ): + """ + Initialize the custom pager. + + Args: + initial_response: The parsed API response from the first request + client_wrapper: The client wrapper providing HTTP client and utilities + """ + raise NotImplementedError( + "SyncCustomPager must be implemented. " + "Please implement this class in core/custom_pagination.py to define your pagination logic. " + "See the class docstring for examples." + ) + + def __iter__(self) -> Iterator[TItem]: + """Iterate through all items across all pages.""" + raise NotImplementedError("Must implement __iter__ method") + + +class AsyncCustomPager(Generic[TItem, TResponse]): + """ + Adapter for custom asynchronous pagination. + + The generator will call this with: + AsyncCustomPager(initial_response=response, client_wrapper=client_wrapper) + + Implement this class to extract pagination metadata from your response + and delegate to your custom async pager implementation. + + Example implementation: + + class AsyncCustomPager(Generic[TItem, TResponse]): + def __init__( + self, + *, + initial_response: TResponse, + client_wrapper: AsyncClientWrapper, + ): + # Extract data and pagination metadata from response + data = initial_response.data # Adjust based on your response structure + links = initial_response.links + + # Initialize your custom async pager + self._pager = MyAsyncCustomPager( + current_page=Page(data), + httpx_client=client_wrapper.httpx_client, + get_headers=client_wrapper.get_headers, + # ... other parameters + ) + + async def __aiter__(self): + return self._pager.__aiter__() + + # Delegate other methods to your pager... + """ + + def __init__( + self, + *, + initial_response: TResponse, + client_wrapper: AsyncClientWrapper, + ): + """ + Initialize the custom async pager. + + Args: + initial_response: The parsed API response from the first request + client_wrapper: The client wrapper providing HTTP client and utilities + """ + raise NotImplementedError( + "AsyncCustomPager must be implemented. " + "Please implement this class in core/custom_pagination.py to define your pagination logic. " + "See the class docstring for examples." + ) + + async def __aiter__(self) -> AsyncIterator[TItem]: + """Asynchronously iterate through all items across all pages.""" + raise NotImplementedError("Must implement __aiter__ method") diff --git a/src/hume/core/http_client.py b/src/hume/core/http_client.py index e4173f99..7c6c936f 100644 --- a/src/hume/core/http_client.py +++ b/src/hume/core/http_client.py @@ -5,7 +5,6 @@ import re import time import typing -import urllib.parse from contextlib import asynccontextmanager, contextmanager from random import random @@ -14,13 +13,13 @@ from .force_multipart import FORCE_MULTIPART from .jsonable_encoder import jsonable_encoder from .query_encoder import encode_query -from .remove_none_from_dict import remove_none_from_dict +from .remove_none_from_dict import remove_none_from_dict as remove_none_from_dict from .request_options import RequestOptions from httpx._types import RequestFiles -INITIAL_RETRY_DELAY_SECONDS = 0.5 -MAX_RETRY_DELAY_SECONDS = 10 -MAX_RETRY_DELAY_SECONDS_FROM_HEADER = 30 +INITIAL_RETRY_DELAY_SECONDS = 1.0 +MAX_RETRY_DELAY_SECONDS = 60.0 +JITTER_FACTOR = 0.2 # 20% random jitter def _parse_retry_after(response_headers: httpx.Headers) -> typing.Optional[float]: @@ -64,6 +63,38 @@ def _parse_retry_after(response_headers: httpx.Headers) -> typing.Optional[float return seconds +def _add_positive_jitter(delay: float) -> float: + """Add positive jitter (0-20%) to prevent thundering herd.""" + jitter_multiplier = 1 + random() * JITTER_FACTOR + return delay * jitter_multiplier + + +def _add_symmetric_jitter(delay: float) -> float: + """Add symmetric jitter (±10%) for exponential backoff.""" + jitter_multiplier = 1 + (random() - 0.5) * JITTER_FACTOR + return delay * jitter_multiplier + + +def _parse_x_ratelimit_reset(response_headers: httpx.Headers) -> typing.Optional[float]: + """ + Parse the X-RateLimit-Reset header (Unix timestamp in seconds). + Returns seconds to wait, or None if header is missing/invalid. + """ + reset_time_str = response_headers.get("x-ratelimit-reset") + if reset_time_str is None: + return None + + try: + reset_time = int(reset_time_str) + delay = reset_time - time.time() + if delay > 0: + return delay + except (ValueError, TypeError): + pass + + return None + + def _retry_timeout(response: httpx.Response, retries: int) -> float: """ Determine the amount of time to wait before retrying a request. @@ -71,17 +102,19 @@ def _retry_timeout(response: httpx.Response, retries: int) -> float: with a jitter to determine the number of seconds to wait. """ - # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. + # 1. Check Retry-After header first retry_after = _parse_retry_after(response.headers) - if retry_after is not None and retry_after <= MAX_RETRY_DELAY_SECONDS_FROM_HEADER: - return retry_after + if retry_after is not None and retry_after > 0: + return min(retry_after, MAX_RETRY_DELAY_SECONDS) - # Apply exponential backoff, capped at MAX_RETRY_DELAY_SECONDS. - retry_delay = min(INITIAL_RETRY_DELAY_SECONDS * pow(2.0, retries), MAX_RETRY_DELAY_SECONDS) + # 2. Check X-RateLimit-Reset header (with positive jitter) + ratelimit_reset = _parse_x_ratelimit_reset(response.headers) + if ratelimit_reset is not None: + return _add_positive_jitter(min(ratelimit_reset, MAX_RETRY_DELAY_SECONDS)) - # Add a randomness / jitter to the retry delay to avoid overwhelming the server with retries. - timeout = retry_delay * (1 - 0.25 * random()) - return timeout if timeout >= 0 else 0 + # 3. Fall back to exponential backoff (with symmetric jitter) + backoff = min(INITIAL_RETRY_DELAY_SECONDS * pow(2.0, retries), MAX_RETRY_DELAY_SECONDS) + return _add_symmetric_jitter(backoff) def _should_retry(response: httpx.Response) -> bool: @@ -89,6 +122,45 @@ def _should_retry(response: httpx.Response) -> bool: return response.status_code >= 500 or response.status_code in retryable_400s +def _build_url(base_url: str, path: typing.Optional[str]) -> str: + """ + Build a full URL by joining a base URL with a path. + + This function correctly handles base URLs that contain path prefixes (e.g., tenant-based URLs) + by using string concatenation instead of urllib.parse.urljoin(), which would incorrectly + strip path components when the path starts with '/'. + + Example: + >>> _build_url("https://cloud.example.com/org/tenant/api", "/users") + 'https://cloud.example.com/org/tenant/api/users' + + Args: + base_url: The base URL, which may contain path prefixes. + path: The path to append. Can be None or empty string. + + Returns: + The full URL with base_url and path properly joined. + """ + if not path: + return base_url + return f"{base_url.rstrip('/')}/{path.lstrip('/')}" + + +def _maybe_filter_none_from_multipart_data( + data: typing.Optional[typing.Any], + request_files: typing.Optional[RequestFiles], + force_multipart: typing.Optional[bool], +) -> typing.Optional[typing.Any]: + """ + Filter None values from data body for multipart/form requests. + This prevents httpx from converting None to empty strings in multipart encoding. + Only applies when files are present or force_multipart is True. + """ + if data is not None and isinstance(data, typing.Mapping) and (request_files or force_multipart): + return remove_none_from_dict(data) + return data + + def remove_omit_from_dict( original: typing.Dict[str, typing.Optional[typing.Any]], omit: typing.Optional[typing.Any], @@ -143,8 +215,19 @@ def get_request_body( # If both data and json are None, we send json data in the event extra properties are specified json_body = maybe_filter_request_body(json, request_options, omit) - # If you have an empty JSON body, you should just send None - return (json_body if json_body != {} else None), data_body if data_body != {} else None + has_additional_body_parameters = bool( + request_options is not None and request_options.get("additional_body_parameters") + ) + + # Only collapse empty dict to None when the body was not explicitly provided + # and there are no additional body parameters. This preserves explicit empty + # bodies (e.g., when an endpoint has a request body type but all fields are optional). + if json_body == {} and json is None and not has_additional_body_parameters: + json_body = None + if data_body == {} and data is None and not has_additional_body_parameters: + data_body = None + + return json_body, data_body class HttpClient: @@ -188,7 +271,7 @@ def request( ] = None, headers: typing.Optional[typing.Dict[str, typing.Any]] = None, request_options: typing.Optional[RequestOptions] = None, - retries: int = 2, + retries: int = 0, omit: typing.Optional[typing.Any] = None, force_multipart: typing.Optional[bool] = None, ) -> httpx.Response: @@ -210,9 +293,31 @@ def request( if (request_files is None or len(request_files) == 0) and force_multipart: request_files = FORCE_MULTIPART + data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart) + + # Compute encoded params separately to avoid passing empty list to httpx + # (httpx strips existing query params from URL when params=[] is passed) + _encoded_params = encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get("additional_query_parameters", {}) or {} + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ) + response = self.httpx_client.request( method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), + url=_build_url(base_url, path), headers=jsonable_encoder( remove_none_from_dict( { @@ -222,23 +327,7 @@ def request( } ) ), - params=encode_query( - jsonable_encoder( - remove_none_from_dict( - remove_omit_from_dict( - { - **(params if params is not None else {}), - **( - request_options.get("additional_query_parameters", {}) or {} - if request_options is not None - else {} - ), - }, - omit, - ) - ) - ) - ), + params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, content=content, @@ -246,9 +335,9 @@ def request( timeout=timeout, ) - max_retries: int = request_options.get("max_retries", 0) if request_options is not None else 0 + max_retries: int = request_options.get("max_retries", 2) if request_options is not None else 2 if _should_retry(response=response): - if max_retries > retries: + if retries < max_retries: time.sleep(_retry_timeout(response=response, retries=retries)) return self.request( path=path, @@ -285,7 +374,7 @@ def stream( ] = None, headers: typing.Optional[typing.Dict[str, typing.Any]] = None, request_options: typing.Optional[RequestOptions] = None, - retries: int = 2, + retries: int = 0, omit: typing.Optional[typing.Any] = None, force_multipart: typing.Optional[bool] = None, ) -> typing.Iterator[httpx.Response]: @@ -307,9 +396,31 @@ def stream( json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) + data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart) + + # Compute encoded params separately to avoid passing empty list to httpx + # (httpx strips existing query params from URL when params=[] is passed) + _encoded_params = encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ) + with self.httpx_client.stream( method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), + url=_build_url(base_url, path), headers=jsonable_encoder( remove_none_from_dict( { @@ -319,23 +430,7 @@ def stream( } ) ), - params=encode_query( - jsonable_encoder( - remove_none_from_dict( - remove_omit_from_dict( - { - **(params if params is not None else {}), - **( - request_options.get("additional_query_parameters", {}) - if request_options is not None - else {} - ), - }, - omit, - ) - ) - ) - ), + params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, content=content, @@ -353,12 +448,19 @@ def __init__( base_timeout: typing.Callable[[], typing.Optional[float]], base_headers: typing.Callable[[], typing.Dict[str, str]], base_url: typing.Optional[typing.Callable[[], str]] = None, + async_base_headers: typing.Optional[typing.Callable[[], typing.Awaitable[typing.Dict[str, str]]]] = None, ): self.base_url = base_url self.base_timeout = base_timeout self.base_headers = base_headers + self.async_base_headers = async_base_headers self.httpx_client = httpx_client + async def _get_headers(self) -> typing.Dict[str, str]: + if self.async_base_headers is not None: + return await self.async_base_headers() + return self.base_headers() + def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: base_url = maybe_base_url if self.base_url is not None and base_url is None: @@ -386,7 +488,7 @@ async def request( ] = None, headers: typing.Optional[typing.Dict[str, typing.Any]] = None, request_options: typing.Optional[RequestOptions] = None, - retries: int = 2, + retries: int = 0, omit: typing.Optional[typing.Any] = None, force_multipart: typing.Optional[bool] = None, ) -> httpx.Response: @@ -408,36 +510,45 @@ async def request( json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) + data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart) + + # Get headers (supports async token providers) + _headers = await self._get_headers() + + # Compute encoded params separately to avoid passing empty list to httpx + # (httpx strips existing query params from URL when params=[] is passed) + _encoded_params = encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get("additional_query_parameters", {}) or {} + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ) + # Add the input to each of these and do None-safety checks response = await self.httpx_client.request( method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), + url=_build_url(base_url, path), headers=jsonable_encoder( remove_none_from_dict( { - **self.base_headers(), + **_headers, **(headers if headers is not None else {}), **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), } ) ), - params=encode_query( - jsonable_encoder( - remove_none_from_dict( - remove_omit_from_dict( - { - **(params if params is not None else {}), - **( - request_options.get("additional_query_parameters", {}) or {} - if request_options is not None - else {} - ), - }, - omit, - ) - ) - ) - ), + params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, content=content, @@ -445,9 +556,9 @@ async def request( timeout=timeout, ) - max_retries: int = request_options.get("max_retries", 0) if request_options is not None else 0 + max_retries: int = request_options.get("max_retries", 2) if request_options is not None else 2 if _should_retry(response=response): - if max_retries > retries: + if retries < max_retries: await asyncio.sleep(_retry_timeout(response=response, retries=retries)) return await self.request( path=path, @@ -483,7 +594,7 @@ async def stream( ] = None, headers: typing.Optional[typing.Dict[str, typing.Any]] = None, request_options: typing.Optional[RequestOptions] = None, - retries: int = 2, + retries: int = 0, omit: typing.Optional[typing.Any] = None, force_multipart: typing.Optional[bool] = None, ) -> typing.AsyncIterator[httpx.Response]: @@ -505,35 +616,44 @@ async def stream( json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) + data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart) + + # Get headers (supports async token providers) + _headers = await self._get_headers() + + # Compute encoded params separately to avoid passing empty list to httpx + # (httpx strips existing query params from URL when params=[] is passed) + _encoded_params = encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + }, + omit=omit, + ) + ) + ) + ) + async with self.httpx_client.stream( method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), + url=_build_url(base_url, path), headers=jsonable_encoder( remove_none_from_dict( { - **self.base_headers(), + **_headers, **(headers if headers is not None else {}), **(request_options.get("additional_headers", {}) if request_options is not None else {}), } ) ), - params=encode_query( - jsonable_encoder( - remove_none_from_dict( - remove_omit_from_dict( - { - **(params if params is not None else {}), - **( - request_options.get("additional_query_parameters", {}) - if request_options is not None - else {} - ), - }, - omit=omit, - ) - ) - ) - ), + params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, content=content, diff --git a/src/hume/empathic_voice/__init__.py b/src/hume/empathic_voice/__init__.py index 73ef85fe..89874760 100644 --- a/src/hume/empathic_voice/__init__.py +++ b/src/hume/empathic_voice/__init__.py @@ -99,7 +99,6 @@ ReturnWebhookSpec, Role, SessionSettings, - SessionSettingsMessage, SessionSettingsVariablesValue, SubscribeEvent, Tool, @@ -223,7 +222,6 @@ "ReturnWebhookSpec": ".types", "Role": ".types", "SessionSettings": ".types", - "SessionSettingsMessage": ".types", "SessionSettingsVariablesValue": ".types", "SubscribeEvent": ".types", "Tool": ".types", @@ -375,7 +373,6 @@ def __dir__(): "ReturnWebhookSpec", "Role", "SessionSettings", - "SessionSettingsMessage", "SessionSettingsVariablesValue", "SubscribeEvent", "Tool", diff --git a/src/hume/empathic_voice/chat/audio/microphone_interface.py b/src/hume/empathic_voice/chat/audio/microphone_interface.py index 9dc6944d..584eaea9 100644 --- a/src/hume/empathic_voice/chat/audio/microphone_interface.py +++ b/src/hume/empathic_voice/chat/audio/microphone_interface.py @@ -11,7 +11,7 @@ from hume.empathic_voice.chat.audio.chat_client import ChatClient from hume.empathic_voice.types import AudioConfiguration from hume.empathic_voice.chat.audio.asyncio_utilities import Stream -from hume.empathic_voice.types.session_settings_message import SessionSettingsMessage +from hume.empathic_voice.types.session_settings import SessionSettings logger = logging.getLogger(__name__) @@ -44,7 +44,7 @@ async def start( audio_config = AudioConfiguration(sample_rate=microphone.sample_rate, channels=microphone.num_channels, encoding="linear16") - session_settings_config = SessionSettingsMessage(audio=audio_config) + session_settings_config = SessionSettings(audio=audio_config) await socket.send_publish( message=session_settings_config ) diff --git a/src/hume/empathic_voice/chat/client.py.diff b/src/hume/empathic_voice/chat/client.py.diff index 558ed985..aa685fdd 100644 --- a/src/hume/empathic_voice/chat/client.py.diff +++ b/src/hume/empathic_voice/chat/client.py.diff @@ -1,134 +1,32 @@ diff --git a/src/hume/empathic_voice/chat/client.py b/src/hume/empathic_voice/chat/client.py -index 2a3732f5..8ec7e4cd 100644 +index 43a9cf28..8ec7e4cd 100644 --- a/src/hume/empathic_voice/chat/client.py +++ b/src/hume/empathic_voice/chat/client.py -@@ -1,11 +1,7 @@ - # This file was auto-generated by Fern from our API Definition. - --from contextlib import asynccontextmanager, contextmanager -- --import json - import typing -- --from typing_extensions import deprecated -+from contextlib import asynccontextmanager, contextmanager - - import httpx - import websockets.exceptions -@@ -14,34 +10,16 @@ from ...core.api_error import ApiError +@@ -10,7 +10,6 @@ from ...core.api_error import ApiError from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ...core.request_options import RequestOptions from ...core.serialization import convert_and_respect_annotation_metadata -from ...core.query_encoder import single_query_encoder from ..types.connect_session_settings import ConnectSessionSettings from .raw_client import AsyncRawChatClient, RawChatClient --from .socket_client import AsyncChatSocketClient, ChatSocketClient, ChatConnectOptions -- --from ...core.events import EventEmitterMixin, EventType --from ...core.pydantic_utilities import parse_obj_as --from ..types.assistant_input import AssistantInput --from ..types.audio_input import AudioInput --from ..types.pause_assistant_message import PauseAssistantMessage --from ..types.resume_assistant_message import ResumeAssistantMessage --from ..types.session_settings import SessionSettings --from ..types.tool_error_message import ToolErrorMessage --from ..types.tool_response_message import ToolResponseMessage --from ..types.user_input import UserInput --from .types.publish_event import PublishEvent --from ..types.subscribe_event import SubscribeEvent -- --from ...core.api_error import ApiError --import asyncio -- --from ...core.websocket import OnErrorHandlerType, OnMessageHandlerType, OnOpenCloseHandlerType -+from .socket_client import AsyncChatSocketClient, ChatSocketClient - - try: - from websockets.legacy.client import connect as websockets_client_connect # type: ignore - except ImportError: - from websockets import connect as websockets_client_connect # type: ignore - -+ - class ChatClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawChatClient(client_wrapper=client_wrapper) -@@ -62,6 +40,7 @@ class ChatClient: - self, - *, - access_token: typing.Optional[str] = None, -+ allow_connection: typing.Optional[bool] = None, - config_id: typing.Optional[str] = None, - config_version: typing.Optional[int] = None, - event_limit: typing.Optional[int] = None, -@@ -69,7 +48,6 @@ class ChatClient: + from .socket_client import AsyncChatSocketClient, ChatSocketClient +@@ -48,7 +47,7 @@ class ChatClient: + resumed_chat_group_id: typing.Optional[str] = None, verbose_transcription: typing.Optional[bool] = None, api_key: typing.Optional[str] = None, - session_settings: ConnectSessionSettings, -- allow_connection: typing.Optional[bool] = None, +- session_settings: typing.Optional[ConnectSessionSettings] = None, ++ session_settings: ConnectSessionSettings, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[ChatSocketClient]: """ -@@ -84,12 +62,15 @@ class ChatClient: - - For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). - -+ allow_connection : typing.Optional[bool] -+ Allows external connections to this chat via the /connect endpoint. -+ - config_id : typing.Optional[str] - The unique identifier for an EVI configuration. - -- Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/speech-to-speech-evi/configuration/build-a-configuration#default-configuration). -+ Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/empathic-voice-interface-evi/configuration#default-configuration). - -- For help obtaining this ID, see our [Configuration Guide](/docs/speech-to-speech-evi/configuration). -+ For help obtaining this ID, see our [Configuration Guide](/docs/empathic-voice-interface-evi/configuration). - - config_version : typing.Optional[int] - The version number of the EVI configuration specified by the `config_id`. -@@ -110,19 +91,16 @@ class ChatClient: - - There are three ways to obtain the Chat Group ID: - -- - [Chat Metadata](/reference/speech-to-speech-evi/chat#receive.ChatMetadata): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. -+ - [Chat Metadata](/reference/empathic-voice-interface-evi/chat/chat#receive.Chat%20Metadata.type): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. - -- - [List Chats endpoint](/reference/speech-to-speech-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. -+ - [List Chats endpoint](/reference/empathic-voice-interface-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. - -- - [List Chat Groups endpoint](/reference/speech-to-speech-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. -+ - [List Chat Groups endpoint](/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. - - verbose_transcription : typing.Optional[bool] -- A flag to enable verbose transcription. Set this query parameter to `true` to have unfinalized user transcripts be sent to the client as interim UserMessage messages. The [interim](/reference/speech-to-speech-evi/chat#receive.UserMessage.interim) field on a [UserMessage](/reference/speech-to-speech-evi/chat#receive.UserMessage) denotes whether the message is "interim" or "final." -+ A flag to enable verbose transcription. Set this query parameter to `"true"` to have unfinalized user transcripts be sent to the client as interim `UserMessage` messages. - - api_key : typing.Optional[str] -- API key used for authenticating the client. If not provided, an `access_token` must be provided to authenticate. -- -- For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). - - session_settings : ConnectSessionSettings - -@@ -137,6 +115,8 @@ class ChatClient: - query_params = httpx.QueryParams() - if access_token is not None: - query_params = query_params.add("access_token", access_token) -+ if allow_connection is not None: -+ query_params = query_params.add("allow_connection", allow_connection) - if config_id is not None: - query_params = query_params.add("config_id", config_id) - if config_version is not None: -@@ -149,12 +129,18 @@ class ChatClient: +@@ -130,10 +129,18 @@ class ChatClient: query_params = query_params.add("verbose_transcription", verbose_transcription) if api_key is not None: query_params = query_params.add("api_key", api_key) -- if allow_connection is not None: -- query_params = query_params.add("allow_connection", str(allow_connection).lower()) - if session_settings is not None: - flattened_params = single_query_encoder("session_settings", session_settings) - for param_key, param_value in flattened_params: -- query_params = query_params.add(param_key, str(param_value)) +- query_params = query_params.add(param_key, param_value) + if ( + convert_and_respect_annotation_metadata( + object_=session_settings, annotation=ConnectSessionSettings, direction="write" @@ -144,92 +42,34 @@ index 2a3732f5..8ec7e4cd 100644 ws_url = ws_url + f"?{query_params}" headers = self._raw_client._client_wrapper.get_headers() if request_options and "additional_headers" in request_options: -@@ -197,14 +183,14 @@ class AsyncChatClient: - self, - *, - access_token: typing.Optional[str] = None, -+ allow_connection: typing.Optional[bool] = None, - config_id: typing.Optional[str] = None, - config_version: typing.Optional[int] = None, - event_limit: typing.Optional[int] = None, +@@ -183,7 +190,7 @@ class AsyncChatClient: resumed_chat_group_id: typing.Optional[str] = None, verbose_transcription: typing.Optional[bool] = None, api_key: typing.Optional[str] = None, - session_settings: typing.Optional[ConnectSessionSettings] = None, -- allow_connection: typing.Optional[bool] = None, + session_settings: ConnectSessionSettings, request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[AsyncChatSocketClient]: """ -@@ -219,12 +205,15 @@ class AsyncChatClient: - - For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). - -+ allow_connection : typing.Optional[bool] -+ Allows external connections to this chat via the /connect endpoint. -+ - config_id : typing.Optional[str] - The unique identifier for an EVI configuration. - -- Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/speech-to-speech-evi/configuration/build-a-configuration#default-configuration). -+ Include this ID in your connection request to equip EVI with the Prompt, Language Model, Voice, and Tools associated with the specified configuration. If omitted, EVI will apply [default configuration settings](/docs/empathic-voice-interface-evi/configuration#default-configuration). - -- For help obtaining this ID, see our [Configuration Guide](/docs/speech-to-speech-evi/configuration). -+ For help obtaining this ID, see our [Configuration Guide](/docs/empathic-voice-interface-evi/configuration). - - config_version : typing.Optional[int] - The version number of the EVI configuration specified by the `config_id`. -@@ -245,27 +234,19 @@ class AsyncChatClient: - - There are three ways to obtain the Chat Group ID: - -- - [Chat Metadata](/reference/speech-to-speech-evi/chat#receive.ChatMetadata): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. -+ - [Chat Metadata](/reference/empathic-voice-interface-evi/chat/chat#receive.Chat%20Metadata.type): Upon establishing a WebSocket connection with EVI, the user receives a Chat Metadata message. This message contains a `chat_group_id`, which can be used to resume conversations within this chat group in future sessions. - -- - [List Chats endpoint](/reference/speech-to-speech-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. -+ - [List Chats endpoint](/reference/empathic-voice-interface-evi/chats/list-chats): Use the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat sessions. This endpoint lists all available Chat sessions and their associated Chat Group ID. - -- - [List Chat Groups endpoint](/reference/speech-to-speech-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. -+ - [List Chat Groups endpoint](/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups): Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs of all Chat Groups associated with an API key. This endpoint returns a list of all available chat groups. - - verbose_transcription : typing.Optional[bool] -- A flag to enable verbose transcription. Set this query parameter to `true` to have unfinalized user transcripts be sent to the client as interim UserMessage messages. The [interim](/reference/speech-to-speech-evi/chat#receive.UserMessage.interim) field on a [UserMessage](/reference/speech-to-speech-evi/chat#receive.UserMessage) denotes whether the message is "interim" or "final." -+ A flag to enable verbose transcription. Set this query parameter to `"true"` to have unfinalized user transcripts be sent to the client as interim `UserMessage` messages. +@@ -238,7 +245,7 @@ class AsyncChatClient: api_key : typing.Optional[str] -- API key used for authenticating the client. If not provided, an `access_token` must be provided to authenticate. -- -- For more details, refer to the [Authentication Strategies Guide](/docs/introduction/api-key#authentication-strategies). - session_settings : ConnectSessionSettings +- session_settings : typing.Optional[ConnectSessionSettings] ++ session_settings : ConnectSessionSettings -- allow_connection : typing.Optional[bool] -- Flag that allows the resulting Chat to accept secondary connections via -- the control plane `/connect` endpoint. Defaults to `False` on the server. -- Set to `True` to enable observer connections for the session. -- request_options : typing.Optional[RequestOptions] Request-specific configuration. - -@@ -277,6 +258,8 @@ class AsyncChatClient: - query_params = httpx.QueryParams() - if access_token is not None: - query_params = query_params.add("access_token", access_token) -+ if allow_connection is not None: -+ query_params = query_params.add("allow_connection", allow_connection) - if config_id is not None: - query_params = query_params.add("config_id", config_id) - if config_version is not None: -@@ -289,12 +272,18 @@ class AsyncChatClient: +@@ -265,12 +272,18 @@ class AsyncChatClient: query_params = query_params.add("verbose_transcription", verbose_transcription) if api_key is not None: query_params = query_params.add("api_key", api_key) -- if allow_connection is not None: -- query_params = query_params.add("allow_connection", str(allow_connection).lower()) +- - if session_settings is not None: - flattened_params = single_query_encoder("session_settings", session_settings) - for param_key, param_value in flattened_params: -- query_params = query_params.add(param_key, str(param_value)) +- query_params = query_params.add(param_key, param_value) +- + if ( + convert_and_respect_annotation_metadata( + object_=session_settings, annotation=ConnectSessionSettings, direction="write" @@ -245,238 +85,3 @@ index 2a3732f5..8ec7e4cd 100644 ws_url = ws_url + f"?{query_params}" headers = self._raw_client._client_wrapper.get_headers() if request_options and "additional_headers" in request_options: -@@ -315,234 +304,3 @@ class AsyncChatClient: - headers=dict(headers), - body="Unexpected error when initializing websocket connection.", - ) -- -- @deprecated("") -- async def _wrap_on_open_close( -- self, on_open: typing.Optional[OnOpenCloseHandlerType] -- ): -- if on_open is not None: -- if asyncio.iscoroutinefunction(on_open): -- await on_open() -- else: -- on_open() -- -- @deprecated("") -- async def _wrap_on_error( -- self, exc: Exception, on_error: typing.Optional[OnErrorHandlerType] -- ) -> None: -- if on_error is not None: -- if asyncio.iscoroutinefunction(on_error): -- await on_error(exc) -- else: -- on_error(exc) -- -- @deprecated("") -- async def _wrap_on_message( -- self, -- message: SubscribeEvent, -- on_message: typing.Optional[OnMessageHandlerType[SubscribeEvent]], -- ) -> None: -- if on_message is not None: -- if asyncio.iscoroutinefunction(on_message): -- await on_message(message) -- else: -- on_message(message) -- -- async def _process_connection( -- self, -- connection: AsyncChatSocketClient, -- on_message: typing.Optional[OnMessageHandlerType], -- on_error: typing.Optional[OnErrorHandlerType], -- ) -> None: -- async for message in connection: -- try: -- await self._wrap_on_message(message, on_message) -- except Exception as exc: -- await self._wrap_on_error(exc, on_error) -- -- def _construct_ws_uri(self, options: typing.Optional[ChatConnectOptions]): -- query_params = httpx.QueryParams() -- -- api_key = self._raw_client._client_wrapper.api_key -- if options is not None: -- maybe_api_key = options.get("api_key") -- if maybe_api_key is not None: -- api_key = maybe_api_key -- maybe_config_id = options.get("config_id") -- if maybe_config_id is not None: -- query_params = query_params.add("config_id", maybe_config_id) -- maybe_config_version = options.get("config_version") -- if maybe_config_version is not None: -- query_params = query_params.add( -- "config_version", maybe_config_version -- ) -- maybe_resumed_chat_group_id = options.get("resumed_chat_group_id") -- if maybe_resumed_chat_group_id is not None: -- query_params = query_params.add( -- "resumed_chat_group_id", maybe_resumed_chat_group_id -- ) -- maybe_verbose_transcription = options.get("verbose_transcription") -- if maybe_verbose_transcription is not None: -- query_params = query_params.add( -- "verbose_transcription", -- "true" if maybe_verbose_transcription else "false", -- ) -- elif api_key is not None: -- query_params = query_params.add("apiKey", api_key) -- -- maybe_voice_id = options.get("voice_id") -- if maybe_voice_id is not None: -- query_params = query_params.add("voice_id", maybe_voice_id) -- -- maybe_session_settings = options.get("session_settings") -- if maybe_session_settings is not None: -- # Handle audio settings -- audio = maybe_session_settings.get("audio") -- if audio is not None: -- channels = audio.get("channels") -- if channels is not None: -- query_params = query_params.add( -- "session_settings[audio][channels]", str(channels) -- ) -- encoding = audio.get("encoding") -- if encoding is not None: -- query_params = query_params.add( -- "session_settings[audio][encoding]", str(encoding) -- ) -- sample_rate = audio.get("sample_rate") -- if sample_rate is not None: -- query_params = query_params.add( -- "session_settings[audio][sample_rate]", str(sample_rate) -- ) -- -- # Handle context settings -- context = maybe_session_settings.get("context") -- if context is not None: -- text = context.get("text") -- if text is not None: -- query_params = query_params.add( -- "session_settings[context][text]", str(text) -- ) -- context_type = context.get("type") -- if context_type is not None: -- query_params = query_params.add( -- "session_settings[context][type]", str(context_type) -- ) -- -- # Handle top-level session settings -- custom_session_id = maybe_session_settings.get("custom_session_id") -- if custom_session_id is not None: -- query_params = query_params.add( -- "session_settings[custom_session_id]", str(custom_session_id) -- ) -- -- event_limit = maybe_session_settings.get("event_limit") -- if event_limit is not None: -- query_params = query_params.add( -- "session_settings[event_limit]", str(event_limit) -- ) -- -- language_model_api_key = maybe_session_settings.get("language_model_api_key") -- if language_model_api_key is not None: -- query_params = query_params.add( -- "session_settings[language_model_api_key]", str(language_model_api_key) -- ) -- -- system_prompt = maybe_session_settings.get("system_prompt") -- if system_prompt is not None: -- query_params = query_params.add( -- "session_settings[system_prompt]", str(system_prompt) -- ) -- -- variables = maybe_session_settings.get("variables") -- if variables is not None: -- query_params = query_params.add( -- "session_settings[variables]", json.dumps(variables) -- ) -- -- voice_id_setting = maybe_session_settings.get("voice_id") -- if voice_id_setting is not None: -- query_params = query_params.add( -- "session_settings[voice_id]", str(voice_id_setting) -- ) -- elif api_key is not None: -- query_params = query_params.add("apiKey", api_key) -- -- base = self._raw_client._client_wrapper.get_environment().evi + "/chat" -- return f"{base}?{query_params}" -- -- @deprecated("Use .on() instead.") -- @asynccontextmanager -- async def connect_with_callbacks( -- self, -- options: typing.Optional[ChatConnectOptions] = None, -- on_open: typing.Optional[OnOpenCloseHandlerType] = None, -- on_message: typing.Optional[OnMessageHandlerType[SubscribeEvent]] = None, -- on_close: typing.Optional[OnOpenCloseHandlerType] = None, -- on_error: typing.Optional[OnErrorHandlerType] = None, -- ) -> typing.AsyncIterator["AsyncChatSocketClient"]: -- """ -- Parameters -- ---------- -- on_open : Optional[OnOpenCloseHandlerType] -- A callable to be invoked on the opening of the websocket connection. -- -- on_message : Optional[OnMessageHandlerType[SubscribeEvent]] -- A callable to be invoked on receiving a message from the websocket connection. This callback should expect a `SubscribeEvent` object. -- -- on_close : Optional[OnOpenCloseHandlerType] -- A callable to be invoked on the closing of the websocket connection. -- -- on_error : Optional[OnErrorHandlerType] -- A callable to be invoked on receiving an error from the websocket connection. -- -- Yields -- ------- -- AsyncIterator["AsyncChatSocketClient"] -- """ -- -- ws_uri = self._construct_ws_uri(options) -- -- background_task: typing.Optional[asyncio.Task[None]] = None -- -- try: -- async with websockets.connect( -- ws_uri, -- extra_headers=self._raw_client._client_wrapper.get_headers(), -- ) as protocol: -- await self._wrap_on_open_close(on_open) -- connection = AsyncChatSocketClient(websocket=protocol) -- background_task = asyncio.create_task( -- self._process_connection(connection, on_message, on_error) -- ) -- -- yield connection -- -- # Special case authentication errors -- except websockets.exceptions.InvalidStatusCode as exc: -- status_code: int = exc.status_code -- if status_code == 401: -- raise ApiError( -- status_code=status_code, -- body="Websocket initialized with invalid credentials.", -- ) from exc -- raise ApiError( -- status_code=status_code, -- body="Unexpected error when initializing websocket connection.", -- ) from exc -- -- # Except all other errors to apply the on_error handler -- except Exception as exc: -- await self._wrap_on_error(exc, on_error) -- raise -- -- # Finally, apply the on_close handler -- finally: -- if background_task is not None: -- background_task.cancel() -- try: -- await background_task -- except asyncio.CancelledError: -- pass -- await self._wrap_on_open_close(on_close) -- diff --git a/src/hume/empathic_voice/chat/socket_client.py b/src/hume/empathic_voice/chat/socket_client.py index d5ea6658..9aaa1956 100644 --- a/src/hume/empathic_voice/chat/socket_client.py +++ b/src/hume/empathic_voice/chat/socket_client.py @@ -9,8 +9,6 @@ from typing_extensions import deprecated from contextlib import asynccontextmanager -from hume.empathic_voice.types.session_settings_message import SessionSettingsMessage - from ...core.events import EventEmitterMixin, EventType from ...core.pydantic_utilities import parse_obj_as from ..types.assistant_input import AssistantInput @@ -148,7 +146,7 @@ async def send_audio_input(self, message: AudioInput) -> None: await self.send_publish(message) @deprecated("Use send_publish instead.") - async def send_session_settings(self, message: SessionSettingsMessage) -> None: + async def send_session_settings(self, message: SessionSettings) -> None: await self.send_publish(message) @deprecated("Use send_publish instead.") @@ -240,7 +238,7 @@ def send_audio_input(self, message: AudioInput) -> None: self.send_publish(message) @deprecated("Use send_publish instead.") - def send_session_settings(self, message: SessionSettingsMessage) -> None: + def send_session_settings(self, message: SessionSettings) -> None: self.send_publish(message) @deprecated("Use send_publish instead.") diff --git a/src/hume/empathic_voice/chat/socket_client.py.diff b/src/hume/empathic_voice/chat/socket_client.py.diff index 77c80ea0..f1d2621b 100644 --- a/src/hume/empathic_voice/chat/socket_client.py.diff +++ b/src/hume/empathic_voice/chat/socket_client.py.diff @@ -1,13 +1,15 @@ diff --git a/src/hume/empathic_voice/chat/socket_client.py b/src/hume/empathic_voice/chat/socket_client.py -index de3b4a5e..18ee74ab 100644 +index d5ea6658..18ee74ab 100644 --- a/src/hume/empathic_voice/chat/socket_client.py +++ b/src/hume/empathic_voice/chat/socket_client.py -@@ -6,21 +6,10 @@ from json.decoder import JSONDecodeError +@@ -6,83 +6,18 @@ from json.decoder import JSONDecodeError import websockets import websockets.sync.connection as websockets_sync_connection -from typing_extensions import deprecated -from contextlib import asynccontextmanager +- +-from hume.empathic_voice.types.session_settings_message import SessionSettingsMessage - from ...core.events import EventEmitterMixin, EventType from ...core.pydantic_utilities import parse_obj_as @@ -25,10 +27,11 @@ index de3b4a5e..18ee74ab 100644 try: from websockets.legacy.client import WebSocketClientProtocol # type: ignore -@@ -29,58 +18,6 @@ except ImportError: - - ChatSocketClientResponse = typing.Union[SubscribeEvent] + except ImportError: + from websockets import WebSocketClientProtocol # type: ignore +-ChatSocketClientResponse = SubscribeEvent +- -class ChatConnectSessionSettingsAudio(typing.TypedDict, total=False): - channels: typing.Optional[int] - encoding: typing.Optional[str] @@ -70,7 +73,8 @@ index de3b4a5e..18ee74ab 100644 - resumed_chat_group_id: typing.Optional[str] - - verbose_transcription: typing.Optional[bool] -- ++ChatSocketClientResponse = typing.Union[SubscribeEvent] + - """ - ID of the Voice to use for this chat. If specified, will override the voice set in the Config - """ @@ -84,7 +88,7 @@ index de3b4a5e..18ee74ab 100644 class AsyncChatSocketClient(EventEmitterMixin): def __init__(self, *, websocket: WebSocketClientProtocol): -@@ -141,38 +78,6 @@ class AsyncChatSocketClient(EventEmitterMixin): +@@ -143,38 +78,6 @@ class AsyncChatSocketClient(EventEmitterMixin): """ await self._send(data.dict()) @@ -93,7 +97,7 @@ index de3b4a5e..18ee74ab 100644 - await self.send_publish(message) - - @deprecated("Use send_publish instead.") -- async def send_session_settings(self, message: SessionSettings) -> None: +- async def send_session_settings(self, message: SessionSettingsMessage) -> None: - await self.send_publish(message) - - @deprecated("Use send_publish instead.") @@ -123,7 +127,7 @@ index de3b4a5e..18ee74ab 100644 class ChatSocketClient(EventEmitterMixin): def __init__(self, *, websocket: websockets_sync_connection.Connection): -@@ -232,35 +137,3 @@ class ChatSocketClient(EventEmitterMixin): +@@ -234,35 +137,3 @@ class ChatSocketClient(EventEmitterMixin): Send a Pydantic model to the websocket connection. """ self._send(data.dict()) @@ -133,7 +137,7 @@ index de3b4a5e..18ee74ab 100644 - self.send_publish(message) - - @deprecated("Use send_publish instead.") -- def send_session_settings(self, message: SessionSettings) -> None: +- def send_session_settings(self, message: SessionSettingsMessage) -> None: - self.send_publish(message) - - @deprecated("Use send_publish instead.") diff --git a/src/hume/empathic_voice/chat/types/publish_event.py b/src/hume/empathic_voice/chat/types/publish_event.py index 3f3bfd0c..12c4e9a9 100644 --- a/src/hume/empathic_voice/chat/types/publish_event.py +++ b/src/hume/empathic_voice/chat/types/publish_event.py @@ -6,14 +6,14 @@ from ...types.audio_input import AudioInput from ...types.pause_assistant_message import PauseAssistantMessage from ...types.resume_assistant_message import ResumeAssistantMessage -from ...types.session_settings_message import SessionSettingsMessage +from ...types.session_settings import SessionSettings from ...types.tool_error_message import ToolErrorMessage from ...types.tool_response_message import ToolResponseMessage from ...types.user_input import UserInput PublishEvent = typing.Union[ AudioInput, - SessionSettingsMessage, + SessionSettings, UserInput, AssistantInput, ToolResponseMessage, diff --git a/src/hume/empathic_voice/client.py b/src/hume/empathic_voice/client.py index e9119462..4fba8feb 100644 --- a/src/hume/empathic_voice/client.py +++ b/src/hume/empathic_voice/client.py @@ -4,15 +4,15 @@ import typing -from hume.empathic_voice.chat.client import AsyncChatClient, ChatClient - from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from .raw_client import AsyncRawEmpathicVoiceClient, RawEmpathicVoiceClient if typing.TYPE_CHECKING: + from .chat.client import AsyncChatClient, ChatClient from .chat_groups.client import AsyncChatGroupsClient, ChatGroupsClient from .chats.client import AsyncChatsClient, ChatsClient from .configs.client import AsyncConfigsClient, ConfigsClient + from .control_plane.client import AsyncControlPlaneClient, ControlPlaneClient from .prompts.client import AsyncPromptsClient, PromptsClient from .tools.client import AsyncToolsClient, ToolsClient @@ -21,11 +21,12 @@ class EmpathicVoiceClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._raw_client = RawEmpathicVoiceClient(client_wrapper=client_wrapper) self._client_wrapper = client_wrapper - self._tools: typing.Optional[ToolsClient] = None - self._prompts: typing.Optional[PromptsClient] = None - self._configs: typing.Optional[ConfigsClient] = None - self._chats: typing.Optional[ChatsClient] = None + self._control_plane: typing.Optional[ControlPlaneClient] = None self._chat_groups: typing.Optional[ChatGroupsClient] = None + self._chats: typing.Optional[ChatsClient] = None + self._configs: typing.Optional[ConfigsClient] = None + self._prompts: typing.Optional[PromptsClient] = None + self._tools: typing.Optional[ToolsClient] = None self._chat: typing.Optional[ChatClient] = None @property @@ -40,20 +41,28 @@ def with_raw_response(self) -> RawEmpathicVoiceClient: return self._raw_client @property - def tools(self): - if self._tools is None: - from .tools.client import ToolsClient # noqa: E402 + def control_plane(self): + if self._control_plane is None: + from .control_plane.client import ControlPlaneClient # noqa: E402 - self._tools = ToolsClient(client_wrapper=self._client_wrapper) - return self._tools + self._control_plane = ControlPlaneClient(client_wrapper=self._client_wrapper) + return self._control_plane @property - def prompts(self): - if self._prompts is None: - from .prompts.client import PromptsClient # noqa: E402 + def chat_groups(self): + if self._chat_groups is None: + from .chat_groups.client import ChatGroupsClient # noqa: E402 - self._prompts = PromptsClient(client_wrapper=self._client_wrapper) - return self._prompts + self._chat_groups = ChatGroupsClient(client_wrapper=self._client_wrapper) + return self._chat_groups + + @property + def chats(self): + if self._chats is None: + from .chats.client import ChatsClient # noqa: E402 + + self._chats = ChatsClient(client_wrapper=self._client_wrapper) + return self._chats @property def configs(self): @@ -64,31 +73,40 @@ def configs(self): return self._configs @property - def chats(self): - if self._chats is None: - from .chats.client import ChatsClient # noqa: E402 + def prompts(self): + if self._prompts is None: + from .prompts.client import PromptsClient # noqa: E402 - self._chats = ChatsClient(client_wrapper=self._client_wrapper) - return self._chats + self._prompts = PromptsClient(client_wrapper=self._client_wrapper) + return self._prompts @property - def chat_groups(self): - if self._chat_groups is None: - from .chat_groups.client import ChatGroupsClient # noqa: E402 + def tools(self): + if self._tools is None: + from .tools.client import ToolsClient # noqa: E402 - self._chat_groups = ChatGroupsClient(client_wrapper=self._client_wrapper) - return self._chat_groups + self._tools = ToolsClient(client_wrapper=self._client_wrapper) + return self._tools + + @property + def chat(self): + if self._chat is None: + from .chat.client import ChatClient # noqa: E402 + + self._chat = ChatClient(client_wrapper=self._client_wrapper) + return self._chat class AsyncEmpathicVoiceClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._raw_client = AsyncRawEmpathicVoiceClient(client_wrapper=client_wrapper) self._client_wrapper = client_wrapper - self._tools: typing.Optional[AsyncToolsClient] = None - self._prompts: typing.Optional[AsyncPromptsClient] = None - self._configs: typing.Optional[AsyncConfigsClient] = None - self._chats: typing.Optional[AsyncChatsClient] = None + self._control_plane: typing.Optional[AsyncControlPlaneClient] = None self._chat_groups: typing.Optional[AsyncChatGroupsClient] = None + self._chats: typing.Optional[AsyncChatsClient] = None + self._configs: typing.Optional[AsyncConfigsClient] = None + self._prompts: typing.Optional[AsyncPromptsClient] = None + self._tools: typing.Optional[AsyncToolsClient] = None self._chat: typing.Optional[AsyncChatClient] = None @property @@ -103,20 +121,28 @@ def with_raw_response(self) -> AsyncRawEmpathicVoiceClient: return self._raw_client @property - def tools(self): - if self._tools is None: - from .tools.client import AsyncToolsClient # noqa: E402 + def control_plane(self): + if self._control_plane is None: + from .control_plane.client import AsyncControlPlaneClient # noqa: E402 - self._tools = AsyncToolsClient(client_wrapper=self._client_wrapper) - return self._tools + self._control_plane = AsyncControlPlaneClient(client_wrapper=self._client_wrapper) + return self._control_plane @property - def prompts(self): - if self._prompts is None: - from .prompts.client import AsyncPromptsClient # noqa: E402 + def chat_groups(self): + if self._chat_groups is None: + from .chat_groups.client import AsyncChatGroupsClient # noqa: E402 - self._prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) - return self._prompts + self._chat_groups = AsyncChatGroupsClient(client_wrapper=self._client_wrapper) + return self._chat_groups + + @property + def chats(self): + if self._chats is None: + from .chats.client import AsyncChatsClient # noqa: E402 + + self._chats = AsyncChatsClient(client_wrapper=self._client_wrapper) + return self._chats @property def configs(self): @@ -127,20 +153,20 @@ def configs(self): return self._configs @property - def chats(self): - if self._chats is None: - from .chats.client import AsyncChatsClient # noqa: E402 + def prompts(self): + if self._prompts is None: + from .prompts.client import AsyncPromptsClient # noqa: E402 - self._chats = AsyncChatsClient(client_wrapper=self._client_wrapper) - return self._chats + self._prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) + return self._prompts @property - def chat_groups(self): - if self._chat_groups is None: - from .chat_groups.client import AsyncChatGroupsClient # noqa: E402 + def tools(self): + if self._tools is None: + from .tools.client import AsyncToolsClient # noqa: E402 - self._chat_groups = AsyncChatGroupsClient(client_wrapper=self._client_wrapper) - return self._chat_groups + self._tools = AsyncToolsClient(client_wrapper=self._client_wrapper) + return self._tools @property def chat(self): diff --git a/src/hume/empathic_voice/client.py.diff b/src/hume/empathic_voice/client.py.diff index 84b74be1..f2b276f0 100644 --- a/src/hume/empathic_voice/client.py.diff +++ b/src/hume/empathic_voice/client.py.diff @@ -1,8 +1,8 @@ diff --git a/src/hume/empathic_voice/client.py b/src/hume/empathic_voice/client.py -index e9119462..241410a1 100644 +index e9119462..4fba8feb 100644 --- a/src/hume/empathic_voice/client.py +++ b/src/hume/empathic_voice/client.py -@@ -4,8 +4,6 @@ from __future__ import annotations +@@ -4,15 +4,15 @@ from __future__ import annotations import typing @@ -11,7 +11,8 @@ index e9119462..241410a1 100644 from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from .raw_client import AsyncRawEmpathicVoiceClient, RawEmpathicVoiceClient -@@ -13,6 +11,7 @@ if typing.TYPE_CHECKING: + if typing.TYPE_CHECKING: ++ from .chat.client import AsyncChatClient, ChatClient from .chat_groups.client import AsyncChatGroupsClient, ChatGroupsClient from .chats.client import AsyncChatsClient, ChatsClient from .configs.client import AsyncConfigsClient, ConfigsClient @@ -19,7 +20,7 @@ index e9119462..241410a1 100644 from .prompts.client import AsyncPromptsClient, PromptsClient from .tools.client import AsyncToolsClient, ToolsClient -@@ -21,12 +20,12 @@ class EmpathicVoiceClient: +@@ -21,11 +21,12 @@ class EmpathicVoiceClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._raw_client = RawEmpathicVoiceClient(client_wrapper=client_wrapper) self._client_wrapper = client_wrapper @@ -29,15 +30,14 @@ index e9119462..241410a1 100644 - self._chats: typing.Optional[ChatsClient] = None + self._control_plane: typing.Optional[ControlPlaneClient] = None self._chat_groups: typing.Optional[ChatGroupsClient] = None -- self._chat: typing.Optional[ChatClient] = None + self._chats: typing.Optional[ChatsClient] = None + self._configs: typing.Optional[ConfigsClient] = None + self._prompts: typing.Optional[PromptsClient] = None + self._tools: typing.Optional[ToolsClient] = None + self._chat: typing.Optional[ChatClient] = None @property - def with_raw_response(self) -> RawEmpathicVoiceClient: -@@ -40,20 +39,28 @@ class EmpathicVoiceClient: +@@ -40,20 +41,28 @@ class EmpathicVoiceClient: return self._raw_client @property @@ -76,7 +76,7 @@ index e9119462..241410a1 100644 @property def configs(self): -@@ -64,32 +71,32 @@ class EmpathicVoiceClient: +@@ -64,31 +73,40 @@ class EmpathicVoiceClient: return self._configs @property @@ -104,6 +104,14 @@ index e9119462..241410a1 100644 - return self._chat_groups + self._tools = ToolsClient(client_wrapper=self._client_wrapper) + return self._tools ++ ++ @property ++ def chat(self): ++ if self._chat is None: ++ from .chat.client import ChatClient # noqa: E402 ++ ++ self._chat = ChatClient(client_wrapper=self._client_wrapper) ++ return self._chat class AsyncEmpathicVoiceClient: @@ -116,15 +124,14 @@ index e9119462..241410a1 100644 - self._chats: typing.Optional[AsyncChatsClient] = None + self._control_plane: typing.Optional[AsyncControlPlaneClient] = None self._chat_groups: typing.Optional[AsyncChatGroupsClient] = None -- self._chat: typing.Optional[AsyncChatClient] = None + self._chats: typing.Optional[AsyncChatsClient] = None + self._configs: typing.Optional[AsyncConfigsClient] = None + self._prompts: typing.Optional[AsyncPromptsClient] = None + self._tools: typing.Optional[AsyncToolsClient] = None + self._chat: typing.Optional[AsyncChatClient] = None @property - def with_raw_response(self) -> AsyncRawEmpathicVoiceClient: -@@ -103,28 +110,20 @@ class AsyncEmpathicVoiceClient: +@@ -103,20 +121,28 @@ class AsyncEmpathicVoiceClient: return self._raw_client @property @@ -144,58 +151,53 @@ index e9119462..241410a1 100644 - def prompts(self): - if self._prompts is None: - from .prompts.client import AsyncPromptsClient # noqa: E402 -- -- self._prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) -- return self._prompts -- -- @property -- def configs(self): -- if self._configs is None: -- from .configs.client import AsyncConfigsClient # noqa: E402 + def chat_groups(self): + if self._chat_groups is None: + from .chat_groups.client import AsyncChatGroupsClient # noqa: E402 -- self._configs = AsyncConfigsClient(client_wrapper=self._client_wrapper) -- return self._configs +- self._prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) +- return self._prompts + self._chat_groups = AsyncChatGroupsClient(client_wrapper=self._client_wrapper) + return self._chat_groups ++ ++ @property ++ def chats(self): ++ if self._chats is None: ++ from .chats.client import AsyncChatsClient # noqa: E402 ++ ++ self._chats = AsyncChatsClient(client_wrapper=self._client_wrapper) ++ return self._chats @property - def chats(self): -@@ -135,17 +134,25 @@ class AsyncEmpathicVoiceClient: - return self._chats - - @property -- def chat_groups(self): -- if self._chat_groups is None: -- from .chat_groups.client import AsyncChatGroupsClient # noqa: E402 -+ def configs(self): -+ if self._configs is None: -+ from .configs.client import AsyncConfigsClient # noqa: E402 - -- self._chat_groups = AsyncChatGroupsClient(client_wrapper=self._client_wrapper) -- return self._chat_groups -+ self._configs = AsyncConfigsClient(client_wrapper=self._client_wrapper) -+ return self._configs + def configs(self): +@@ -127,20 +153,20 @@ class AsyncEmpathicVoiceClient: + return self._configs @property -- def chat(self): -- if self._chat is None: -- from .chat.client import AsyncChatClient # noqa: E402 +- def chats(self): +- if self._chats is None: +- from .chats.client import AsyncChatsClient # noqa: E402 + def prompts(self): + if self._prompts is None: + from .prompts.client import AsyncPromptsClient # noqa: E402 -- self._chat = AsyncChatClient(client_wrapper=self._client_wrapper) -- return self._chat +- self._chats = AsyncChatsClient(client_wrapper=self._client_wrapper) +- return self._chats + self._prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) + return self._prompts -+ -+ @property + + @property +- def chat_groups(self): +- if self._chat_groups is None: +- from .chat_groups.client import AsyncChatGroupsClient # noqa: E402 + def tools(self): + if self._tools is None: + from .tools.client import AsyncToolsClient # noqa: E402 -+ + +- self._chat_groups = AsyncChatGroupsClient(client_wrapper=self._client_wrapper) +- return self._chat_groups + self._tools = AsyncToolsClient(client_wrapper=self._client_wrapper) + return self._tools + + @property + def chat(self): diff --git a/src/hume/empathic_voice/types/__init__.py b/src/hume/empathic_voice/types/__init__.py index 3e015320..531427e4 100644 --- a/src/hume/empathic_voice/types/__init__.py +++ b/src/hume/empathic_voice/types/__init__.py @@ -100,7 +100,6 @@ from .return_webhook_spec import ReturnWebhookSpec from .role import Role from .session_settings import SessionSettings - from .session_settings_message import SessionSettingsMessage from .session_settings_variables_value import SessionSettingsVariablesValue from .subscribe_event import SubscribeEvent from .tool import Tool @@ -218,7 +217,6 @@ "ReturnWebhookSpec": ".return_webhook_spec", "Role": ".role", "SessionSettings": ".session_settings", - "SessionSettingsMessage": ".session_settings_message", "SessionSettingsVariablesValue": ".session_settings_variables_value", "SubscribeEvent": ".subscribe_event", "Tool": ".tool", @@ -360,7 +358,6 @@ def __dir__(): "ReturnWebhookSpec", "Role", "SessionSettings", - "SessionSettingsMessage", "SessionSettingsVariablesValue", "SubscribeEvent", "Tool", diff --git a/src/hume/empathic_voice/types/json_message.py b/src/hume/empathic_voice/types/json_message.py index 3b1ce923..7806639b 100644 --- a/src/hume/empathic_voice/types/json_message.py +++ b/src/hume/empathic_voice/types/json_message.py @@ -6,7 +6,7 @@ from .assistant_message import AssistantMessage from .assistant_prosody import AssistantProsody from .chat_metadata import ChatMetadata -from .session_settings_message import SessionSettingsMessage +from .session_settings import SessionSettings from .tool_call_message import ToolCallMessage from .tool_error_message import ToolErrorMessage from .tool_response_message import ToolResponseMessage @@ -25,5 +25,5 @@ ToolCallMessage, ToolResponseMessage, ToolErrorMessage, - SessionSettingsMessage, + SessionSettings, ] diff --git a/src/hume/empathic_voice/types/session_settings_message.py b/src/hume/empathic_voice/types/session_settings_message.py deleted file mode 100644 index 4ff5960f..00000000 --- a/src/hume/empathic_voice/types/session_settings_message.py +++ /dev/null @@ -1,101 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .audio_configuration import AudioConfiguration -from .builtin_tool_config import BuiltinToolConfig -from .context import Context -from .tool import Tool - - -class SessionSettingsMessage(UniversalBaseModel): - """ - Settings for this chat session. - """ - - audio: typing.Optional[AudioConfiguration] = pydantic.Field(default=None) - """ - Configuration details for the audio input used during the session. Ensures the audio is being correctly set up for processing. - - This optional field is only required when the audio input is encoded in PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For detailed instructions on how to configure session settings for PCM Linear 16 audio, please refer to the [Session Settings section](/docs/empathic-voice-interface-evi/configuration#session-settings) on the EVI Configuration page. - """ - - builtin_tools: typing.Optional[typing.List[BuiltinToolConfig]] = pydantic.Field(default=None) - """ - List of built-in tools to enable for the session. - - Tools are resources used by EVI to perform various tasks, such as searching the web or calling external APIs. Built-in tools, like web search, are natively integrated, while user-defined tools are created and invoked by the user. To learn more, see our [Tool Use Guide](/docs/empathic-voice-interface-evi/tool-use). - - Currently, the only built-in tool Hume provides is **Web Search**. When enabled, Web Search equips EVI with the ability to search the web for up-to-date information. - """ - - context: typing.Optional[Context] = pydantic.Field(default=None) - """ - Allows developers to inject additional context into the conversation, which is appended to the end of user messages for the session. - - When included in a Session Settings message, the provided context can be used to remind the LLM of its role in every user message, prevent it from forgetting important details, or add new relevant information to the conversation. - - Set to `null` to disable context injection. - """ - - custom_session_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the session. Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. - - If included, the response sent from Hume to your backend will include this ID. This allows you to correlate frontend users with their incoming messages. - - It is recommended to pass a `custom_session_id` if you are using a Custom Language Model. Please see our guide to [using a custom language model](/docs/empathic-voice-interface-evi/custom-language-model) with EVI to learn more. - """ - - language_model_api_key: typing.Optional[str] = pydantic.Field(default=None) - """ - Third party API key for the supplemental language model. - - When provided, EVI will use this key instead of Hume's API key for the supplemental LLM. This allows you to bypass rate limits and utilize your own API key as needed. - """ - - metadata: typing.Optional[typing.Dict[str, typing.Any]] = None - system_prompt: typing.Optional[str] = pydantic.Field(default=None) - """ - Instructions used to shape EVI's behavior, responses, and style for the session. - - When included in a Session Settings message, the provided Prompt overrides the existing one specified in the EVI configuration. If no Prompt was defined in the configuration, this Prompt will be the one used for the session. - - You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. - - For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). - """ - - tools: typing.Optional[typing.List[Tool]] = pydantic.Field(default=None) - """ - List of user-defined tools to enable for the session. - - Tools are resources used by EVI to perform various tasks, such as searching the web or calling external APIs. Built-in tools, like web search, are natively integrated, while user-defined tools are created and invoked by the user. To learn more, see our [Tool Use Guide](/docs/empathic-voice-interface-evi/tool-use). - """ - - type: typing.Literal["session_settings"] = pydantic.Field(default="session_settings") - """ - The type of message sent through the socket; must be `session_settings` for our server to correctly identify and process it as a Session Settings message. - - Session settings are temporary and apply only to the current Chat session. These settings can be adjusted dynamically based on the requirements of each session to ensure optimal performance and user experience. - - For more information, please refer to the [Session Settings section](/docs/empathic-voice-interface-evi/configuration#session-settings) on the EVI Configuration page. - """ - - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) - """ - Dynamic values that can be used to populate EVI prompts. - """ - - voice_id: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/hume/empathic_voice/types/subscribe_event.py b/src/hume/empathic_voice/types/subscribe_event.py index 927a57d9..ce63e139 100644 --- a/src/hume/empathic_voice/types/subscribe_event.py +++ b/src/hume/empathic_voice/types/subscribe_event.py @@ -7,7 +7,7 @@ from .assistant_prosody import AssistantProsody from .audio_output import AudioOutput from .chat_metadata import ChatMetadata -from .session_settings_message import SessionSettingsMessage +from .session_settings import SessionSettings from .tool_call_message import ToolCallMessage from .tool_error_message import ToolErrorMessage from .tool_response_message import ToolResponseMessage @@ -27,5 +27,5 @@ ToolCallMessage, ToolResponseMessage, ToolErrorMessage, - SessionSettingsMessage, + SessionSettings, ] diff --git a/src/hume/tts/client.py b/src/hume/tts/client.py index a427e2b6..a403e8f9 100644 --- a/src/hume/tts/client.py +++ b/src/hume/tts/client.py @@ -4,8 +4,7 @@ import typing -from hume.tts.stream_input.client import StreamInputClient - +from .. import core from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.request_options import RequestOptions from .raw_client import AsyncRawTtsClient, RawTtsClient @@ -13,13 +12,14 @@ from .types.octave_version import OctaveVersion from .types.posted_context import PostedContext from .types.posted_utterance import PostedUtterance +from .types.posted_utterance_voice import PostedUtteranceVoice from .types.return_tts import ReturnTts from .types.timestamp_type import TimestampType from .types.tts_output import TtsOutput if typing.TYPE_CHECKING: - from .voices.client import AsyncVoicesClient, VoicesClient from .stream_input.client import AsyncStreamInputClient, StreamInputClient + from .voices.client import AsyncVoicesClient, VoicesClient # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -75,10 +75,12 @@ def synthesize_json( Specifies the output audio file format. include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] - The set of timestamp types to include in the response. + The set of timestamp types to include in the response. Only supported for Octave 2 requests. num_generations : typing.Optional[int] - Number of generations of the audio to produce. + Number of audio generations to produce from the input utterances. + + Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. split_utterances : typing.Optional[bool] Controls how audio output is segmented in the response. @@ -100,10 +102,6 @@ def synthesize_json( For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. instant_mode : typing.Optional[bool] - Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. - - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). - - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -187,10 +185,12 @@ def synthesize_file( Specifies the output audio file format. include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] - The set of timestamp types to include in the response. + The set of timestamp types to include in the response. Only supported for Octave 2 requests. num_generations : typing.Optional[int] - Number of generations of the audio to produce. + Number of audio generations to produce from the input utterances. + + Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. split_utterances : typing.Optional[bool] Controls how audio output is segmented in the response. @@ -212,10 +212,6 @@ def synthesize_file( For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. instant_mode : typing.Optional[bool] - Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. - - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). - - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). request_options : typing.Optional[RequestOptions] Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. @@ -292,10 +288,12 @@ def synthesize_file_streaming( Specifies the output audio file format. include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] - The set of timestamp types to include in the response. + The set of timestamp types to include in the response. Only supported for Octave 2 requests. num_generations : typing.Optional[int] - Number of generations of the audio to produce. + Number of audio generations to produce from the input utterances. + + Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. split_utterances : typing.Optional[bool] Controls how audio output is segmented in the response. @@ -317,10 +315,6 @@ def synthesize_file_streaming( For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. instant_mode : typing.Optional[bool] - Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. - - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). - - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). request_options : typing.Optional[RequestOptions] Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. @@ -397,10 +391,12 @@ def synthesize_json_streaming( Specifies the output audio file format. include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] - The set of timestamp types to include in the response. + The set of timestamp types to include in the response. Only supported for Octave 2 requests. num_generations : typing.Optional[int] - Number of generations of the audio to produce. + Number of audio generations to produce from the input utterances. + + Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. split_utterances : typing.Optional[bool] Controls how audio output is segmented in the response. @@ -422,10 +418,6 @@ def synthesize_json_streaming( For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. instant_mode : typing.Optional[bool] - Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. - - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). - - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -471,6 +463,117 @@ def synthesize_json_streaming( ) as r: yield from r.data + def convert_voice_file( + self, + *, + audio: core.File, + strip_headers: typing.Optional[bool] = OMIT, + context: typing.Optional[PostedContext] = OMIT, + voice: typing.Optional[PostedUtteranceVoice] = OMIT, + format: typing.Optional[Format] = OMIT, + include_timestamp_types: typing.Optional[typing.List[TimestampType]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[bytes]: + """ + Parameters + ---------- + audio : core.File + See core.File for more documentation + + strip_headers : typing.Optional[bool] + If enabled, the audio for all the chunks of a generation, once concatenated together, will constitute a single audio file. Otherwise, if disabled, each chunk's audio will be its own audio file, each with its own headers (if applicable). + + context : typing.Optional[PostedContext] + Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. + + voice : typing.Optional[PostedUtteranceVoice] + + format : typing.Optional[Format] + Specifies the output audio file format. + + include_timestamp_types : typing.Optional[typing.List[TimestampType]] + The set of timestamp types to include in the response. When used in multipart/form-data, specify each value using bracket notation: `include_timestamp_types[0]=word&include_timestamp_types[1]=phoneme`. Only supported for Octave 2 requests. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + + Returns + ------- + typing.Iterator[bytes] + Successful Response + """ + with self._raw_client.convert_voice_file( + audio=audio, + strip_headers=strip_headers, + context=context, + voice=voice, + format=format, + include_timestamp_types=include_timestamp_types, + request_options=request_options, + ) as r: + yield from r.data + + def convert_voice_json( + self, + *, + strip_headers: typing.Optional[bool] = OMIT, + audio: typing.Optional[core.File] = OMIT, + context: typing.Optional[PostedContext] = OMIT, + voice: typing.Optional[PostedUtteranceVoice] = OMIT, + format: typing.Optional[Format] = OMIT, + include_timestamp_types: typing.Optional[typing.List[TimestampType]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[TtsOutput]: + """ + Parameters + ---------- + strip_headers : typing.Optional[bool] + If enabled, the audio for all the chunks of a generation, once concatenated together, will constitute a single audio file. Otherwise, if disabled, each chunk's audio will be its own audio file, each with its own headers (if applicable). + + audio : typing.Optional[core.File] + See core.File for more documentation + + context : typing.Optional[PostedContext] + Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. + + voice : typing.Optional[PostedUtteranceVoice] + + format : typing.Optional[Format] + Specifies the output audio file format. + + include_timestamp_types : typing.Optional[typing.List[TimestampType]] + The set of timestamp types to include in the response. When used in multipart/form-data, specify each value using bracket notation: `include_timestamp_types[0]=word&include_timestamp_types[1]=phoneme`. Only supported for Octave 2 requests. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[TtsOutput] + Successful Response + + Examples + -------- + from hume import HumeClient + + client = HumeClient( + api_key="YOUR_API_KEY", + ) + response = client.tts.convert_voice_json() + for chunk in response: + yield chunk + """ + with self._raw_client.convert_voice_json( + strip_headers=strip_headers, + audio=audio, + context=context, + voice=voice, + format=format, + include_timestamp_types=include_timestamp_types, + request_options=request_options, + ) as r: + yield from r.data + @property def voices(self): if self._voices is None: @@ -539,10 +642,12 @@ async def synthesize_json( Specifies the output audio file format. include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] - The set of timestamp types to include in the response. + The set of timestamp types to include in the response. Only supported for Octave 2 requests. num_generations : typing.Optional[int] - Number of generations of the audio to produce. + Number of audio generations to produce from the input utterances. + + Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. split_utterances : typing.Optional[bool] Controls how audio output is segmented in the response. @@ -564,10 +669,6 @@ async def synthesize_json( For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. instant_mode : typing.Optional[bool] - Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. - - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). - - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -659,10 +760,12 @@ async def synthesize_file( Specifies the output audio file format. include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] - The set of timestamp types to include in the response. + The set of timestamp types to include in the response. Only supported for Octave 2 requests. num_generations : typing.Optional[int] - Number of generations of the audio to produce. + Number of audio generations to produce from the input utterances. + + Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. split_utterances : typing.Optional[bool] Controls how audio output is segmented in the response. @@ -684,10 +787,6 @@ async def synthesize_file( For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. instant_mode : typing.Optional[bool] - Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. - - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). - - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). request_options : typing.Optional[RequestOptions] Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. @@ -773,10 +872,12 @@ async def synthesize_file_streaming( Specifies the output audio file format. include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] - The set of timestamp types to include in the response. + The set of timestamp types to include in the response. Only supported for Octave 2 requests. num_generations : typing.Optional[int] - Number of generations of the audio to produce. + Number of audio generations to produce from the input utterances. + + Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. split_utterances : typing.Optional[bool] Controls how audio output is segmented in the response. @@ -798,10 +899,6 @@ async def synthesize_file_streaming( For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. instant_mode : typing.Optional[bool] - Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. - - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). - - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). request_options : typing.Optional[RequestOptions] Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. @@ -887,10 +984,12 @@ async def synthesize_json_streaming( Specifies the output audio file format. include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] - The set of timestamp types to include in the response. + The set of timestamp types to include in the response. Only supported for Octave 2 requests. num_generations : typing.Optional[int] - Number of generations of the audio to produce. + Number of audio generations to produce from the input utterances. + + Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. split_utterances : typing.Optional[bool] Controls how audio output is segmented in the response. @@ -912,10 +1011,6 @@ async def synthesize_json_streaming( For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. instant_mode : typing.Optional[bool] - Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. - - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). - - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -970,6 +1065,127 @@ async def main() -> None: async for _chunk in r.data: yield _chunk + async def convert_voice_file( + self, + *, + audio: core.File, + strip_headers: typing.Optional[bool] = OMIT, + context: typing.Optional[PostedContext] = OMIT, + voice: typing.Optional[PostedUtteranceVoice] = OMIT, + format: typing.Optional[Format] = OMIT, + include_timestamp_types: typing.Optional[typing.List[TimestampType]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[bytes]: + """ + Parameters + ---------- + audio : core.File + See core.File for more documentation + + strip_headers : typing.Optional[bool] + If enabled, the audio for all the chunks of a generation, once concatenated together, will constitute a single audio file. Otherwise, if disabled, each chunk's audio will be its own audio file, each with its own headers (if applicable). + + context : typing.Optional[PostedContext] + Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. + + voice : typing.Optional[PostedUtteranceVoice] + + format : typing.Optional[Format] + Specifies the output audio file format. + + include_timestamp_types : typing.Optional[typing.List[TimestampType]] + The set of timestamp types to include in the response. When used in multipart/form-data, specify each value using bracket notation: `include_timestamp_types[0]=word&include_timestamp_types[1]=phoneme`. Only supported for Octave 2 requests. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + + Returns + ------- + typing.AsyncIterator[bytes] + Successful Response + """ + async with self._raw_client.convert_voice_file( + audio=audio, + strip_headers=strip_headers, + context=context, + voice=voice, + format=format, + include_timestamp_types=include_timestamp_types, + request_options=request_options, + ) as r: + async for _chunk in r.data: + yield _chunk + + async def convert_voice_json( + self, + *, + strip_headers: typing.Optional[bool] = OMIT, + audio: typing.Optional[core.File] = OMIT, + context: typing.Optional[PostedContext] = OMIT, + voice: typing.Optional[PostedUtteranceVoice] = OMIT, + format: typing.Optional[Format] = OMIT, + include_timestamp_types: typing.Optional[typing.List[TimestampType]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[TtsOutput]: + """ + Parameters + ---------- + strip_headers : typing.Optional[bool] + If enabled, the audio for all the chunks of a generation, once concatenated together, will constitute a single audio file. Otherwise, if disabled, each chunk's audio will be its own audio file, each with its own headers (if applicable). + + audio : typing.Optional[core.File] + See core.File for more documentation + + context : typing.Optional[PostedContext] + Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. + + voice : typing.Optional[PostedUtteranceVoice] + + format : typing.Optional[Format] + Specifies the output audio file format. + + include_timestamp_types : typing.Optional[typing.List[TimestampType]] + The set of timestamp types to include in the response. When used in multipart/form-data, specify each value using bracket notation: `include_timestamp_types[0]=word&include_timestamp_types[1]=phoneme`. Only supported for Octave 2 requests. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[TtsOutput] + Successful Response + + Examples + -------- + import asyncio + + from hume import AsyncHumeClient + + client = AsyncHumeClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + response = await client.tts.convert_voice_json() + async for chunk in response: + yield chunk + + + asyncio.run(main()) + """ + async with self._raw_client.convert_voice_json( + strip_headers=strip_headers, + audio=audio, + context=context, + voice=voice, + format=format, + include_timestamp_types=include_timestamp_types, + request_options=request_options, + ) as r: + async for _chunk in r.data: + yield _chunk + @property def voices(self): if self._voices is None: @@ -981,9 +1197,7 @@ def voices(self): @property def stream_input(self): if self._stream_input is None: - from .stream_input.client import AsyncStreamInputClient + from .stream_input.client import AsyncStreamInputClient # noqa: E402 - self._stream_input = AsyncStreamInputClient( - client_wrapper=self._client_wrapper, - ) + self._stream_input = AsyncStreamInputClient(client_wrapper=self._client_wrapper) return self._stream_input diff --git a/src/hume/tts/client.py.diff b/src/hume/tts/client.py.diff deleted file mode 100644 index d4b435bc..00000000 --- a/src/hume/tts/client.py.diff +++ /dev/null @@ -1,527 +0,0 @@ -diff --git a/src/hume/tts/client.py b/src/hume/tts/client.py -index a427e2b6..fdff3be9 100644 ---- a/src/hume/tts/client.py -+++ b/src/hume/tts/client.py -@@ -4,8 +4,7 @@ from __future__ import annotations - - import typing - --from hume.tts.stream_input.client import StreamInputClient -- -+from .. import core - from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper - from ..core.request_options import RequestOptions - from .raw_client import AsyncRawTtsClient, RawTtsClient -@@ -13,13 +12,13 @@ from .types.format import Format - from .types.octave_version import OctaveVersion - from .types.posted_context import PostedContext - from .types.posted_utterance import PostedUtterance -+from .types.posted_utterance_voice import PostedUtteranceVoice - from .types.return_tts import ReturnTts - from .types.timestamp_type import TimestampType - from .types.tts_output import TtsOutput - - if typing.TYPE_CHECKING: - from .voices.client import AsyncVoicesClient, VoicesClient -- from .stream_input.client import AsyncStreamInputClient, StreamInputClient - # this is used as the default value for optional parameters - OMIT = typing.cast(typing.Any, ...) - -@@ -29,7 +28,6 @@ class TtsClient: - self._raw_client = RawTtsClient(client_wrapper=client_wrapper) - self._client_wrapper = client_wrapper - self._voices: typing.Optional[VoicesClient] = None -- self._stream_input: typing.Optional[StreamInputClient] = None - - @property - def with_raw_response(self) -> RawTtsClient: -@@ -75,10 +73,12 @@ class TtsClient: - Specifies the output audio file format. - - include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] -- The set of timestamp types to include in the response. -+ The set of timestamp types to include in the response. Only supported for Octave 2 requests. - - num_generations : typing.Optional[int] -- Number of generations of the audio to produce. -+ Number of audio generations to produce from the input utterances. -+ -+ Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. - - split_utterances : typing.Optional[bool] - Controls how audio output is segmented in the response. -@@ -100,10 +100,6 @@ class TtsClient: - For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. - - instant_mode : typing.Optional[bool] -- Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). -- - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. -- - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). -- - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. -@@ -187,10 +183,12 @@ class TtsClient: - Specifies the output audio file format. - - include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] -- The set of timestamp types to include in the response. -+ The set of timestamp types to include in the response. Only supported for Octave 2 requests. - - num_generations : typing.Optional[int] -- Number of generations of the audio to produce. -+ Number of audio generations to produce from the input utterances. -+ -+ Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. - - split_utterances : typing.Optional[bool] - Controls how audio output is segmented in the response. -@@ -212,10 +210,6 @@ class TtsClient: - For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. - - instant_mode : typing.Optional[bool] -- Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). -- - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. -- - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). -- - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. -@@ -292,10 +286,12 @@ class TtsClient: - Specifies the output audio file format. - - include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] -- The set of timestamp types to include in the response. -+ The set of timestamp types to include in the response. Only supported for Octave 2 requests. - - num_generations : typing.Optional[int] -- Number of generations of the audio to produce. -+ Number of audio generations to produce from the input utterances. -+ -+ Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. - - split_utterances : typing.Optional[bool] - Controls how audio output is segmented in the response. -@@ -317,10 +313,6 @@ class TtsClient: - For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. - - instant_mode : typing.Optional[bool] -- Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). -- - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. -- - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). -- - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. -@@ -397,10 +389,12 @@ class TtsClient: - Specifies the output audio file format. - - include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] -- The set of timestamp types to include in the response. -+ The set of timestamp types to include in the response. Only supported for Octave 2 requests. - - num_generations : typing.Optional[int] -- Number of generations of the audio to produce. -+ Number of audio generations to produce from the input utterances. -+ -+ Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. - - split_utterances : typing.Optional[bool] - Controls how audio output is segmented in the response. -@@ -422,10 +416,6 @@ class TtsClient: - For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. - - instant_mode : typing.Optional[bool] -- Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). -- - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. -- - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). -- - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. -@@ -471,6 +461,117 @@ class TtsClient: - ) as r: - yield from r.data - -+ def convert_voice_file( -+ self, -+ *, -+ audio: core.File, -+ strip_headers: typing.Optional[bool] = OMIT, -+ context: typing.Optional[PostedContext] = OMIT, -+ voice: typing.Optional[PostedUtteranceVoice] = OMIT, -+ format: typing.Optional[Format] = OMIT, -+ include_timestamp_types: typing.Optional[typing.List[TimestampType]] = OMIT, -+ request_options: typing.Optional[RequestOptions] = None, -+ ) -> typing.Iterator[bytes]: -+ """ -+ Parameters -+ ---------- -+ audio : core.File -+ See core.File for more documentation -+ -+ strip_headers : typing.Optional[bool] -+ If enabled, the audio for all the chunks of a generation, once concatenated together, will constitute a single audio file. Otherwise, if disabled, each chunk's audio will be its own audio file, each with its own headers (if applicable). -+ -+ context : typing.Optional[PostedContext] -+ Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. -+ -+ voice : typing.Optional[PostedUtteranceVoice] -+ -+ format : typing.Optional[Format] -+ Specifies the output audio file format. -+ -+ include_timestamp_types : typing.Optional[typing.List[TimestampType]] -+ The set of timestamp types to include in the response. When used in multipart/form-data, specify each value using bracket notation: `include_timestamp_types[0]=word&include_timestamp_types[1]=phoneme`. Only supported for Octave 2 requests. -+ -+ request_options : typing.Optional[RequestOptions] -+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. -+ -+ Returns -+ ------- -+ typing.Iterator[bytes] -+ Successful Response -+ """ -+ with self._raw_client.convert_voice_file( -+ audio=audio, -+ strip_headers=strip_headers, -+ context=context, -+ voice=voice, -+ format=format, -+ include_timestamp_types=include_timestamp_types, -+ request_options=request_options, -+ ) as r: -+ yield from r.data -+ -+ def convert_voice_json( -+ self, -+ *, -+ strip_headers: typing.Optional[bool] = OMIT, -+ audio: typing.Optional[core.File] = OMIT, -+ context: typing.Optional[PostedContext] = OMIT, -+ voice: typing.Optional[PostedUtteranceVoice] = OMIT, -+ format: typing.Optional[Format] = OMIT, -+ include_timestamp_types: typing.Optional[typing.List[TimestampType]] = OMIT, -+ request_options: typing.Optional[RequestOptions] = None, -+ ) -> typing.Iterator[TtsOutput]: -+ """ -+ Parameters -+ ---------- -+ strip_headers : typing.Optional[bool] -+ If enabled, the audio for all the chunks of a generation, once concatenated together, will constitute a single audio file. Otherwise, if disabled, each chunk's audio will be its own audio file, each with its own headers (if applicable). -+ -+ audio : typing.Optional[core.File] -+ See core.File for more documentation -+ -+ context : typing.Optional[PostedContext] -+ Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. -+ -+ voice : typing.Optional[PostedUtteranceVoice] -+ -+ format : typing.Optional[Format] -+ Specifies the output audio file format. -+ -+ include_timestamp_types : typing.Optional[typing.List[TimestampType]] -+ The set of timestamp types to include in the response. When used in multipart/form-data, specify each value using bracket notation: `include_timestamp_types[0]=word&include_timestamp_types[1]=phoneme`. Only supported for Octave 2 requests. -+ -+ request_options : typing.Optional[RequestOptions] -+ Request-specific configuration. -+ -+ Yields -+ ------ -+ typing.Iterator[TtsOutput] -+ Successful Response -+ -+ Examples -+ -------- -+ from hume import HumeClient -+ -+ client = HumeClient( -+ api_key="YOUR_API_KEY", -+ ) -+ response = client.tts.convert_voice_json() -+ for chunk in response: -+ yield chunk -+ """ -+ with self._raw_client.convert_voice_json( -+ strip_headers=strip_headers, -+ audio=audio, -+ context=context, -+ voice=voice, -+ format=format, -+ include_timestamp_types=include_timestamp_types, -+ request_options=request_options, -+ ) as r: -+ yield from r.data -+ - @property - def voices(self): - if self._voices is None: -@@ -479,21 +580,12 @@ class TtsClient: - self._voices = VoicesClient(client_wrapper=self._client_wrapper) - return self._voices - -- @property -- def stream_input(self): -- if self._stream_input is None: -- from .stream_input.client import StreamInputClient # noqa: E402 -- -- self._stream_input = StreamInputClient(client_wrapper=self._client_wrapper) -- return self._stream_input -- - - class AsyncTtsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawTtsClient(client_wrapper=client_wrapper) - self._client_wrapper = client_wrapper - self._voices: typing.Optional[AsyncVoicesClient] = None -- self._stream_input: typing.Optional[AsyncStreamInputClient] = None - - @property - def with_raw_response(self) -> AsyncRawTtsClient: -@@ -539,10 +631,12 @@ class AsyncTtsClient: - Specifies the output audio file format. - - include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] -- The set of timestamp types to include in the response. -+ The set of timestamp types to include in the response. Only supported for Octave 2 requests. - - num_generations : typing.Optional[int] -- Number of generations of the audio to produce. -+ Number of audio generations to produce from the input utterances. -+ -+ Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. - - split_utterances : typing.Optional[bool] - Controls how audio output is segmented in the response. -@@ -564,10 +658,6 @@ class AsyncTtsClient: - For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. - - instant_mode : typing.Optional[bool] -- Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). -- - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. -- - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). -- - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. -@@ -659,10 +749,12 @@ class AsyncTtsClient: - Specifies the output audio file format. - - include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] -- The set of timestamp types to include in the response. -+ The set of timestamp types to include in the response. Only supported for Octave 2 requests. - - num_generations : typing.Optional[int] -- Number of generations of the audio to produce. -+ Number of audio generations to produce from the input utterances. -+ -+ Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. - - split_utterances : typing.Optional[bool] - Controls how audio output is segmented in the response. -@@ -684,10 +776,6 @@ class AsyncTtsClient: - For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. - - instant_mode : typing.Optional[bool] -- Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). -- - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. -- - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). -- - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. -@@ -773,10 +861,12 @@ class AsyncTtsClient: - Specifies the output audio file format. - - include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] -- The set of timestamp types to include in the response. -+ The set of timestamp types to include in the response. Only supported for Octave 2 requests. - - num_generations : typing.Optional[int] -- Number of generations of the audio to produce. -+ Number of audio generations to produce from the input utterances. -+ -+ Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. - - split_utterances : typing.Optional[bool] - Controls how audio output is segmented in the response. -@@ -798,10 +888,6 @@ class AsyncTtsClient: - For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. - - instant_mode : typing.Optional[bool] -- Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). -- - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. -- - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). -- - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. -@@ -887,10 +973,12 @@ class AsyncTtsClient: - Specifies the output audio file format. - - include_timestamp_types : typing.Optional[typing.Sequence[TimestampType]] -- The set of timestamp types to include in the response. -+ The set of timestamp types to include in the response. Only supported for Octave 2 requests. - - num_generations : typing.Optional[int] -- Number of generations of the audio to produce. -+ Number of audio generations to produce from the input utterances. -+ -+ Using `num_generations` enables faster processing than issuing multiple sequential requests. Additionally, specifying `num_generations` allows prosody continuation across all generations without repeating context, ensuring each generation sounds slightly different while maintaining contextual consistency. - - split_utterances : typing.Optional[bool] - Controls how audio output is segmented in the response. -@@ -912,10 +1000,6 @@ class AsyncTtsClient: - For a comparison of Octave versions, see the [Octave versions](/docs/text-to-speech-tts/overview#octave-versions) section in the TTS overview. - - instant_mode : typing.Optional[bool] -- Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). -- - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode. -- - Instant mode is only supported for streaming endpoints (e.g., [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). -- - Ensure only a single generation is requested ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) must be `1` or omitted). - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. -@@ -970,6 +1054,127 @@ class AsyncTtsClient: - async for _chunk in r.data: - yield _chunk - -+ async def convert_voice_file( -+ self, -+ *, -+ audio: core.File, -+ strip_headers: typing.Optional[bool] = OMIT, -+ context: typing.Optional[PostedContext] = OMIT, -+ voice: typing.Optional[PostedUtteranceVoice] = OMIT, -+ format: typing.Optional[Format] = OMIT, -+ include_timestamp_types: typing.Optional[typing.List[TimestampType]] = OMIT, -+ request_options: typing.Optional[RequestOptions] = None, -+ ) -> typing.AsyncIterator[bytes]: -+ """ -+ Parameters -+ ---------- -+ audio : core.File -+ See core.File for more documentation -+ -+ strip_headers : typing.Optional[bool] -+ If enabled, the audio for all the chunks of a generation, once concatenated together, will constitute a single audio file. Otherwise, if disabled, each chunk's audio will be its own audio file, each with its own headers (if applicable). -+ -+ context : typing.Optional[PostedContext] -+ Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. -+ -+ voice : typing.Optional[PostedUtteranceVoice] -+ -+ format : typing.Optional[Format] -+ Specifies the output audio file format. -+ -+ include_timestamp_types : typing.Optional[typing.List[TimestampType]] -+ The set of timestamp types to include in the response. When used in multipart/form-data, specify each value using bracket notation: `include_timestamp_types[0]=word&include_timestamp_types[1]=phoneme`. Only supported for Octave 2 requests. -+ -+ request_options : typing.Optional[RequestOptions] -+ Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. -+ -+ Returns -+ ------- -+ typing.AsyncIterator[bytes] -+ Successful Response -+ """ -+ async with self._raw_client.convert_voice_file( -+ audio=audio, -+ strip_headers=strip_headers, -+ context=context, -+ voice=voice, -+ format=format, -+ include_timestamp_types=include_timestamp_types, -+ request_options=request_options, -+ ) as r: -+ async for _chunk in r.data: -+ yield _chunk -+ -+ async def convert_voice_json( -+ self, -+ *, -+ strip_headers: typing.Optional[bool] = OMIT, -+ audio: typing.Optional[core.File] = OMIT, -+ context: typing.Optional[PostedContext] = OMIT, -+ voice: typing.Optional[PostedUtteranceVoice] = OMIT, -+ format: typing.Optional[Format] = OMIT, -+ include_timestamp_types: typing.Optional[typing.List[TimestampType]] = OMIT, -+ request_options: typing.Optional[RequestOptions] = None, -+ ) -> typing.AsyncIterator[TtsOutput]: -+ """ -+ Parameters -+ ---------- -+ strip_headers : typing.Optional[bool] -+ If enabled, the audio for all the chunks of a generation, once concatenated together, will constitute a single audio file. Otherwise, if disabled, each chunk's audio will be its own audio file, each with its own headers (if applicable). -+ -+ audio : typing.Optional[core.File] -+ See core.File for more documentation -+ -+ context : typing.Optional[PostedContext] -+ Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. -+ -+ voice : typing.Optional[PostedUtteranceVoice] -+ -+ format : typing.Optional[Format] -+ Specifies the output audio file format. -+ -+ include_timestamp_types : typing.Optional[typing.List[TimestampType]] -+ The set of timestamp types to include in the response. When used in multipart/form-data, specify each value using bracket notation: `include_timestamp_types[0]=word&include_timestamp_types[1]=phoneme`. Only supported for Octave 2 requests. -+ -+ request_options : typing.Optional[RequestOptions] -+ Request-specific configuration. -+ -+ Yields -+ ------ -+ typing.AsyncIterator[TtsOutput] -+ Successful Response -+ -+ Examples -+ -------- -+ import asyncio -+ -+ from hume import AsyncHumeClient -+ -+ client = AsyncHumeClient( -+ api_key="YOUR_API_KEY", -+ ) -+ -+ -+ async def main() -> None: -+ response = await client.tts.convert_voice_json() -+ async for chunk in response: -+ yield chunk -+ -+ -+ asyncio.run(main()) -+ """ -+ async with self._raw_client.convert_voice_json( -+ strip_headers=strip_headers, -+ audio=audio, -+ context=context, -+ voice=voice, -+ format=format, -+ include_timestamp_types=include_timestamp_types, -+ request_options=request_options, -+ ) as r: -+ async for _chunk in r.data: -+ yield _chunk -+ - @property - def voices(self): - if self._voices is None: -@@ -977,13 +1182,3 @@ class AsyncTtsClient: - - self._voices = AsyncVoicesClient(client_wrapper=self._client_wrapper) - return self._voices -- -- @property -- def stream_input(self): -- if self._stream_input is None: -- from .stream_input.client import AsyncStreamInputClient -- -- self._stream_input = AsyncStreamInputClient( -- client_wrapper=self._client_wrapper, -- ) -- return self._stream_input diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..2103b1be --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,99 @@ +""" +Pytest plugin that manages the WireMock container lifecycle for wire tests. + +This plugin is loaded globally for the test suite and is responsible for +starting and stopping the WireMock container exactly once per test run, +including when running with pytest-xdist over the entire project. + +It lives under tests/ (as tests/conftest.py) and is discovered automatically +by pytest's normal test collection rules. +""" + +import os +import subprocess + +import pytest + +_STARTED: bool = False + + +def _compose_file() -> str: + """Returns the path to the docker-compose file for WireMock.""" + # This file lives in tests/conftest.py, so the project root is the parent of tests. + tests_dir = os.path.dirname(__file__) + project_root = os.path.abspath(os.path.join(tests_dir, "..")) + wiremock_dir = os.path.join(project_root, "wiremock") + return os.path.join(wiremock_dir, "docker-compose.test.yml") + + +def _start_wiremock() -> None: + """Starts the WireMock container using docker-compose.""" + global _STARTED + if _STARTED: + return + + compose_file = _compose_file() + print("\nStarting WireMock container...") + try: + subprocess.run( + ["docker", "compose", "-f", compose_file, "up", "-d", "--wait"], + check=True, + capture_output=True, + text=True, + ) + print("WireMock container is ready") + _STARTED = True + except subprocess.CalledProcessError as e: + print(f"Failed to start WireMock: {e.stderr}") + raise + + +def _stop_wiremock() -> None: + """Stops and removes the WireMock container.""" + compose_file = _compose_file() + print("\nStopping WireMock container...") + subprocess.run( + ["docker", "compose", "-f", compose_file, "down", "-v"], + check=False, + capture_output=True, + ) + + +def _is_xdist_worker(config: pytest.Config) -> bool: + """ + Determines if the current process is an xdist worker. + + In pytest-xdist, worker processes have a 'workerinput' attribute + on the config object, while the controller process does not. + """ + return hasattr(config, "workerinput") + + +def pytest_configure(config: pytest.Config) -> None: + """ + Pytest hook that runs during test session setup. + + Starts WireMock container only from the controller process (xdist) + or the single process (non-xdist). This ensures only one container + is started regardless of the number of worker processes. + """ + if _is_xdist_worker(config): + # Workers never manage the container lifecycle. + return + + _start_wiremock() + + +def pytest_unconfigure(config: pytest.Config) -> None: + """ + Pytest hook that runs during test session teardown. + + Stops WireMock container only from the controller process (xdist) + or the single process (non-xdist). This ensures the container is + cleaned up after all workers have finished. + """ + if _is_xdist_worker(config): + # Workers never manage the container lifecycle. + return + + _stop_wiremock() diff --git a/tests/utils/test_http_client.py b/tests/utils/test_http_client.py index b0866ba8..305bc669 100644 --- a/tests/utils/test_http_client.py +++ b/tests/utils/test_http_client.py @@ -1,13 +1,57 @@ # This file was auto-generated by Fern from our API Definition. -from hume.core.http_client import get_request_body +from typing import Any, Dict + +import pytest + +from hume.core.http_client import ( + AsyncHttpClient, + HttpClient, + _build_url, + get_request_body, + remove_none_from_dict, +) from hume.core.request_options import RequestOptions +# Stub clients for testing HttpClient and AsyncHttpClient +class _DummySyncClient: + """A minimal stub for httpx.Client that records request arguments.""" + + def __init__(self) -> None: + self.last_request_kwargs: Dict[str, Any] = {} + + def request(self, **kwargs: Any) -> "_DummyResponse": + self.last_request_kwargs = kwargs + return _DummyResponse() + + +class _DummyAsyncClient: + """A minimal stub for httpx.AsyncClient that records request arguments.""" + + def __init__(self) -> None: + self.last_request_kwargs: Dict[str, Any] = {} + + async def request(self, **kwargs: Any) -> "_DummyResponse": + self.last_request_kwargs = kwargs + return _DummyResponse() + + +class _DummyResponse: + """A minimal stub for httpx.Response.""" + + status_code = 200 + headers: Dict[str, str] = {} + + def get_request_options() -> RequestOptions: return {"additional_body_parameters": {"see you": "later"}} +def get_request_options_with_none() -> RequestOptions: + return {"additional_body_parameters": {"see you": "later", "optional": None}} + + def test_get_json_request_body() -> None: json_body, data_body = get_request_body(json={"hello": "world"}, data=None, request_options=None, omit=None) assert json_body == {"hello": "world"} @@ -48,14 +92,209 @@ def test_get_none_request_body() -> None: def test_get_empty_json_request_body() -> None: + """Test that implicit empty bodies (json=None) are collapsed to None.""" unrelated_request_options: RequestOptions = {"max_retries": 3} json_body, data_body = get_request_body(json=None, data=None, request_options=unrelated_request_options, omit=None) assert json_body is None assert data_body is None - json_body_extras, data_body_extras = get_request_body( - json={}, data=None, request_options=unrelated_request_options, omit=None + +def test_explicit_empty_json_body_is_preserved() -> None: + """Test that explicit empty bodies (json={}) are preserved and sent as {}. + + This is important for endpoints where the request body is required but all + fields are optional. The server expects valid JSON ({}) not an empty body. + """ + unrelated_request_options: RequestOptions = {"max_retries": 3} + + # Explicit json={} should be preserved + json_body, data_body = get_request_body(json={}, data=None, request_options=unrelated_request_options, omit=None) + assert json_body == {} + assert data_body is None + + # Explicit data={} should also be preserved + json_body2, data_body2 = get_request_body(json=None, data={}, request_options=unrelated_request_options, omit=None) + assert json_body2 is None + assert data_body2 == {} + + +def test_json_body_preserves_none_values() -> None: + """Test that JSON bodies preserve None values (they become JSON null).""" + json_body, data_body = get_request_body( + json={"hello": "world", "optional": None}, data=None, request_options=None, omit=None ) + # JSON bodies should preserve None values + assert json_body == {"hello": "world", "optional": None} + assert data_body is None - assert json_body_extras is None - assert data_body_extras is None + +def test_data_body_preserves_none_values_without_multipart() -> None: + """Test that data bodies preserve None values when not using multipart. + + The filtering of None values happens in HttpClient.request/stream methods, + not in get_request_body. This test verifies get_request_body doesn't filter None. + """ + json_body, data_body = get_request_body( + json=None, data={"hello": "world", "optional": None}, request_options=None, omit=None + ) + # get_request_body should preserve None values in data body + # The filtering happens later in HttpClient.request when multipart is detected + assert data_body == {"hello": "world", "optional": None} + assert json_body is None + + +def test_remove_none_from_dict_filters_none_values() -> None: + """Test that remove_none_from_dict correctly filters out None values.""" + original = {"hello": "world", "optional": None, "another": "value", "also_none": None} + filtered = remove_none_from_dict(original) + assert filtered == {"hello": "world", "another": "value"} + # Original should not be modified + assert original == {"hello": "world", "optional": None, "another": "value", "also_none": None} + + +def test_remove_none_from_dict_empty_dict() -> None: + """Test that remove_none_from_dict handles empty dict.""" + assert remove_none_from_dict({}) == {} + + +def test_remove_none_from_dict_all_none() -> None: + """Test that remove_none_from_dict handles dict with all None values.""" + assert remove_none_from_dict({"a": None, "b": None}) == {} + + +def test_http_client_does_not_pass_empty_params_list() -> None: + """Test that HttpClient passes params=None when params are empty. + + This prevents httpx from stripping existing query parameters from the URL, + which happens when params=[] or params={} is passed. + """ + dummy_client = _DummySyncClient() + http_client = HttpClient( + httpx_client=dummy_client, # type: ignore[arg-type] + base_timeout=lambda: None, + base_headers=lambda: {}, + base_url=lambda: "https://example.com", + ) + + # Use a path with query params (e.g., pagination cursor URL) + http_client.request( + path="resource?after=123", + method="GET", + params=None, + request_options=None, + ) + + # We care that httpx receives params=None, not [] or {} + assert "params" in dummy_client.last_request_kwargs + assert dummy_client.last_request_kwargs["params"] is None + + # Verify the query string in the URL is preserved + url = str(dummy_client.last_request_kwargs["url"]) + assert "after=123" in url, f"Expected query param 'after=123' in URL, got: {url}" + + +def test_http_client_passes_encoded_params_when_present() -> None: + """Test that HttpClient passes encoded params when params are provided.""" + dummy_client = _DummySyncClient() + http_client = HttpClient( + httpx_client=dummy_client, # type: ignore[arg-type] + base_timeout=lambda: None, + base_headers=lambda: {}, + base_url=lambda: "https://example.com/resource", + ) + + http_client.request( + path="", + method="GET", + params={"after": "456"}, + request_options=None, + ) + + params = dummy_client.last_request_kwargs["params"] + # For a simple dict, encode_query should give a single (key, value) tuple + assert params == [("after", "456")] + + +@pytest.mark.asyncio +async def test_async_http_client_does_not_pass_empty_params_list() -> None: + """Test that AsyncHttpClient passes params=None when params are empty. + + This prevents httpx from stripping existing query parameters from the URL, + which happens when params=[] or params={} is passed. + """ + dummy_client = _DummyAsyncClient() + http_client = AsyncHttpClient( + httpx_client=dummy_client, # type: ignore[arg-type] + base_timeout=lambda: None, + base_headers=lambda: {}, + base_url=lambda: "https://example.com", + async_base_headers=None, + ) + + # Use a path with query params (e.g., pagination cursor URL) + await http_client.request( + path="resource?after=123", + method="GET", + params=None, + request_options=None, + ) + + # We care that httpx receives params=None, not [] or {} + assert "params" in dummy_client.last_request_kwargs + assert dummy_client.last_request_kwargs["params"] is None + + # Verify the query string in the URL is preserved + url = str(dummy_client.last_request_kwargs["url"]) + assert "after=123" in url, f"Expected query param 'after=123' in URL, got: {url}" + + +@pytest.mark.asyncio +async def test_async_http_client_passes_encoded_params_when_present() -> None: + """Test that AsyncHttpClient passes encoded params when params are provided.""" + dummy_client = _DummyAsyncClient() + http_client = AsyncHttpClient( + httpx_client=dummy_client, # type: ignore[arg-type] + base_timeout=lambda: None, + base_headers=lambda: {}, + base_url=lambda: "https://example.com/resource", + async_base_headers=None, + ) + + await http_client.request( + path="", + method="GET", + params={"after": "456"}, + request_options=None, + ) + + params = dummy_client.last_request_kwargs["params"] + # For a simple dict, encode_query should give a single (key, value) tuple + assert params == [("after", "456")] + + +def test_basic_url_joining() -> None: + """Test basic URL joining with a simple base URL and path.""" + result = _build_url("https://api.example.com", "/users") + assert result == "https://api.example.com/users" + + +def test_basic_url_joining_trailing_slash() -> None: + """Test basic URL joining with a simple base URL and path.""" + result = _build_url("https://api.example.com/", "/users") + assert result == "https://api.example.com/users" + + +def test_preserves_base_url_path_prefix() -> None: + """Test that path prefixes in base URL are preserved. + + This is the critical bug fix - urllib.parse.urljoin() would strip + the path prefix when the path starts with '/'. + """ + result = _build_url("https://cloud.example.com/org/tenant/api", "/users") + assert result == "https://cloud.example.com/org/tenant/api/users" + + +def test_preserves_base_url_path_prefix_trailing_slash() -> None: + """Test that path prefixes in base URL are preserved.""" + result = _build_url("https://cloud.example.com/org/tenant/api/", "/users") + assert result == "https://cloud.example.com/org/tenant/api/users" diff --git a/tests/wire/__init__.py b/tests/wire/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/wire/conftest.py b/tests/wire/conftest.py new file mode 100644 index 00000000..23097596 --- /dev/null +++ b/tests/wire/conftest.py @@ -0,0 +1,57 @@ +""" +Pytest configuration for wire tests. + +This module provides helpers for creating a configured client that talks to +WireMock and for verifying requests in WireMock. + +The WireMock container lifecycle itself is managed by a top-level pytest +plugin (wiremock_pytest_plugin.py) so that the container is started exactly +once per test run, even when using pytest-xdist. +""" + +from typing import Any, Dict, Optional + +import requests + +from hume.client import HumeClient + + +def get_client(test_id: str) -> HumeClient: + """ + Creates a configured client instance for wire tests. + + Args: + test_id: Unique identifier for the test, used for request tracking. + + Returns: + A configured client instance with all required auth parameters. + """ + return HumeClient( + base_url="http://localhost:8080", + headers={"X-Test-Id": test_id}, + api_key="test_api_key", + ) + + +def verify_request_count( + test_id: str, + method: str, + url_path: str, + query_params: Optional[Dict[str, str]], + expected: int, +) -> None: + """Verifies the number of requests made to WireMock filtered by test ID for concurrency safety""" + wiremock_admin_url = "http://localhost:8080/__admin" + request_body: Dict[str, Any] = { + "method": method, + "urlPath": url_path, + "headers": {"X-Test-Id": {"equalTo": test_id}}, + } + if query_params: + query_parameters = {k: {"equalTo": v} for k, v in query_params.items()} + request_body["queryParameters"] = query_parameters + response = requests.post(f"{wiremock_admin_url}/requests/find", json=request_body) + assert response.status_code == 200, "Failed to query WireMock requests" + result = response.json() + requests_found = len(result.get("requests", [])) + assert requests_found == expected, f"Expected {expected} requests, found {requests_found}" diff --git a/tests/wire/test_empathicVoice_chatGroups.py b/tests/wire/test_empathicVoice_chatGroups.py new file mode 100644 index 00000000..74162113 --- /dev/null +++ b/tests/wire/test_empathicVoice_chatGroups.py @@ -0,0 +1,70 @@ +from .conftest import get_client, verify_request_count + + +def test_empathicVoice_chatGroups_list_chat_groups() -> None: + """Test list-chat-groups endpoint with WireMock""" + test_id = "empathic_voice.chat_groups.list_chat_groups.0" + client = get_client(test_id) + client.empathic_voice.chat_groups.list_chat_groups( + page_number=0, page_size=1, ascending_order=True, config_id="1b60e1a0-cc59-424a-8d2c-189d354db3f3" + ) + verify_request_count( + test_id, + "GET", + "/v0/evi/chat_groups", + { + "page_number": "0", + "page_size": "1", + "ascending_order": "true", + "config_id": "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + }, + 1, + ) + + +def test_empathicVoice_chatGroups_get_chat_group() -> None: + """Test get-chat-group endpoint with WireMock""" + test_id = "empathic_voice.chat_groups.get_chat_group.0" + client = get_client(test_id) + client.empathic_voice.chat_groups.get_chat_group( + id="697056f0-6c7e-487d-9bd8-9c19df79f05f", page_number=0, page_size=1, ascending_order=True + ) + verify_request_count( + test_id, + "GET", + "/v0/evi/chat_groups/697056f0-6c7e-487d-9bd8-9c19df79f05f", + {"page_number": "0", "page_size": "1", "ascending_order": "true"}, + 1, + ) + + +def test_empathicVoice_chatGroups_get_audio() -> None: + """Test get-audio endpoint with WireMock""" + test_id = "empathic_voice.chat_groups.get_audio.0" + client = get_client(test_id) + client.empathic_voice.chat_groups.get_audio( + id="369846cf-6ad5-404d-905e-a8acb5cdfc78", page_number=0, page_size=10, ascending_order=True + ) + verify_request_count( + test_id, + "GET", + "/v0/evi/chat_groups/369846cf-6ad5-404d-905e-a8acb5cdfc78/audio", + {"page_number": "0", "page_size": "10", "ascending_order": "true"}, + 1, + ) + + +def test_empathicVoice_chatGroups_list_chat_group_events() -> None: + """Test list-chat-group-events endpoint with WireMock""" + test_id = "empathic_voice.chat_groups.list_chat_group_events.0" + client = get_client(test_id) + client.empathic_voice.chat_groups.list_chat_group_events( + id="697056f0-6c7e-487d-9bd8-9c19df79f05f", page_number=0, page_size=3, ascending_order=True + ) + verify_request_count( + test_id, + "GET", + "/v0/evi/chat_groups/697056f0-6c7e-487d-9bd8-9c19df79f05f/events", + {"page_number": "0", "page_size": "3", "ascending_order": "true"}, + 1, + ) diff --git a/tests/wire/test_empathicVoice_chats.py b/tests/wire/test_empathicVoice_chats.py new file mode 100644 index 00000000..47e57ec4 --- /dev/null +++ b/tests/wire/test_empathicVoice_chats.py @@ -0,0 +1,35 @@ +from .conftest import get_client, verify_request_count + + +def test_empathicVoice_chats_list_chats() -> None: + """Test list-chats endpoint with WireMock""" + test_id = "empathic_voice.chats.list_chats.0" + client = get_client(test_id) + client.empathic_voice.chats.list_chats(page_number=0, page_size=1, ascending_order=True) + verify_request_count( + test_id, "GET", "/v0/evi/chats", {"page_number": "0", "page_size": "1", "ascending_order": "true"}, 1 + ) + + +def test_empathicVoice_chats_list_chat_events() -> None: + """Test list-chat-events endpoint with WireMock""" + test_id = "empathic_voice.chats.list_chat_events.0" + client = get_client(test_id) + client.empathic_voice.chats.list_chat_events( + id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", page_number=0, page_size=3, ascending_order=True + ) + verify_request_count( + test_id, + "GET", + "/v0/evi/chats/470a49f6-1dec-4afe-8b61-035d3b2d63b0", + {"page_number": "0", "page_size": "3", "ascending_order": "true"}, + 1, + ) + + +def test_empathicVoice_chats_get_audio() -> None: + """Test get-audio endpoint with WireMock""" + test_id = "empathic_voice.chats.get_audio.0" + client = get_client(test_id) + client.empathic_voice.chats.get_audio(id="470a49f6-1dec-4afe-8b61-035d3b2d63b0") + verify_request_count(test_id, "GET", "/v0/evi/chats/470a49f6-1dec-4afe-8b61-035d3b2d63b0/audio", None, 1) diff --git a/tests/wire/test_empathicVoice_configs.py b/tests/wire/test_empathicVoice_configs.py new file mode 100644 index 00000000..7a0c16be --- /dev/null +++ b/tests/wire/test_empathicVoice_configs.py @@ -0,0 +1,103 @@ +from .conftest import get_client, verify_request_count + + +def test_empathicVoice_configs_list_configs() -> None: + """Test list-configs endpoint with WireMock""" + test_id = "empathic_voice.configs.list_configs.0" + client = get_client(test_id) + client.empathic_voice.configs.list_configs(page_number=0, page_size=1) + verify_request_count(test_id, "GET", "/v0/evi/configs", {"page_number": "0", "page_size": "1"}, 1) + + +def test_empathicVoice_configs_create_config() -> None: + """Test create-config endpoint with WireMock""" + test_id = "empathic_voice.configs.create_config.0" + client = get_client(test_id) + client.empathic_voice.configs.create_config( + name="Weather Assistant Config", + prompt={"id": "af699d45-2985-42cc-91b9-af9e5da3bac5", "version": 0}, + evi_version="3", + voice={"provider": "HUME_AI"}, + language_model={"model_provider": "ANTHROPIC", "model_resource": "claude-3-7-sonnet-latest", "temperature": 1}, + event_messages={ + "on_new_chat": {"enabled": False, "text": ""}, + "on_inactivity_timeout": {"enabled": False, "text": ""}, + "on_max_duration_timeout": {"enabled": False, "text": ""}, + }, + ) + verify_request_count(test_id, "POST", "/v0/evi/configs", None, 1) + + +def test_empathicVoice_configs_list_config_versions() -> None: + """Test list-config-versions endpoint with WireMock""" + test_id = "empathic_voice.configs.list_config_versions.0" + client = get_client(test_id) + client.empathic_voice.configs.list_config_versions(id="1b60e1a0-cc59-424a-8d2c-189d354db3f3") + verify_request_count(test_id, "GET", "/v0/evi/configs/1b60e1a0-cc59-424a-8d2c-189d354db3f3", None, 1) + + +def test_empathicVoice_configs_create_config_version() -> None: + """Test create-config-version endpoint with WireMock""" + test_id = "empathic_voice.configs.create_config_version.0" + client = get_client(test_id) + client.empathic_voice.configs.create_config_version( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version_description="This is an updated version of the Weather Assistant Config.", + evi_version="3", + prompt={"id": "af699d45-2985-42cc-91b9-af9e5da3bac5", "version": 0}, + voice={"provider": "HUME_AI"}, + language_model={"model_provider": "ANTHROPIC", "model_resource": "claude-3-7-sonnet-latest", "temperature": 1}, + ellm_model={"allow_short_responses": True}, + event_messages={ + "on_new_chat": {"enabled": False, "text": ""}, + "on_inactivity_timeout": {"enabled": False, "text": ""}, + "on_max_duration_timeout": {"enabled": False, "text": ""}, + }, + ) + verify_request_count(test_id, "POST", "/v0/evi/configs/1b60e1a0-cc59-424a-8d2c-189d354db3f3", None, 1) + + +def test_empathicVoice_configs_delete_config() -> None: + """Test delete-config endpoint with WireMock""" + test_id = "empathic_voice.configs.delete_config.0" + client = get_client(test_id) + client.empathic_voice.configs.delete_config(id="1b60e1a0-cc59-424a-8d2c-189d354db3f3") + verify_request_count(test_id, "DELETE", "/v0/evi/configs/1b60e1a0-cc59-424a-8d2c-189d354db3f3", None, 1) + + +def test_empathicVoice_configs_update_config_name() -> None: + """Test update-config-name endpoint with WireMock""" + test_id = "empathic_voice.configs.update_config_name.0" + client = get_client(test_id) + client.empathic_voice.configs.update_config_name( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", name="Updated Weather Assistant Config Name" + ) + verify_request_count(test_id, "PATCH", "/v0/evi/configs/1b60e1a0-cc59-424a-8d2c-189d354db3f3", None, 1) + + +def test_empathicVoice_configs_get_config_version() -> None: + """Test get-config-version endpoint with WireMock""" + test_id = "empathic_voice.configs.get_config_version.0" + client = get_client(test_id) + client.empathic_voice.configs.get_config_version(id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", version=1) + verify_request_count(test_id, "GET", "/v0/evi/configs/1b60e1a0-cc59-424a-8d2c-189d354db3f3/version/1", None, 1) + + +def test_empathicVoice_configs_delete_config_version() -> None: + """Test delete-config-version endpoint with WireMock""" + test_id = "empathic_voice.configs.delete_config_version.0" + client = get_client(test_id) + client.empathic_voice.configs.delete_config_version(id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", version=1) + verify_request_count(test_id, "DELETE", "/v0/evi/configs/1b60e1a0-cc59-424a-8d2c-189d354db3f3/version/1", None, 1) + + +def test_empathicVoice_configs_update_config_description() -> None: + """Test update-config-description endpoint with WireMock""" + test_id = "empathic_voice.configs.update_config_description.0" + client = get_client(test_id) + client.empathic_voice.configs.update_config_description( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version=1, + version_description="This is an updated version_description.", + ) + verify_request_count(test_id, "PATCH", "/v0/evi/configs/1b60e1a0-cc59-424a-8d2c-189d354db3f3/version/1", None, 1) diff --git a/tests/wire/test_empathicVoice_controlPlane.py b/tests/wire/test_empathicVoice_controlPlane.py new file mode 100644 index 00000000..2963f38f --- /dev/null +++ b/tests/wire/test_empathicVoice_controlPlane.py @@ -0,0 +1,9 @@ +from .conftest import get_client, verify_request_count + + +def test_empathicVoice_controlPlane_send() -> None: + """Test send endpoint with WireMock""" + test_id = "empathic_voice.control_plane.send.0" + client = get_client(test_id) + client.empathic_voice.control_plane.send(chat_id="chat_id", request={"type": "session_settings"}) + verify_request_count(test_id, "POST", "/v0/evi/chat/chat_id/send", None, 1) diff --git a/tests/wire/test_empathicVoice_prompts.py b/tests/wire/test_empathicVoice_prompts.py new file mode 100644 index 00000000..3dc25e7f --- /dev/null +++ b/tests/wire/test_empathicVoice_prompts.py @@ -0,0 +1,86 @@ +from .conftest import get_client, verify_request_count + + +def test_empathicVoice_prompts_list_prompts() -> None: + """Test list-prompts endpoint with WireMock""" + test_id = "empathic_voice.prompts.list_prompts.0" + client = get_client(test_id) + client.empathic_voice.prompts.list_prompts(page_number=0, page_size=2) + verify_request_count(test_id, "GET", "/v0/evi/prompts", {"page_number": "0", "page_size": "2"}, 1) + + +def test_empathicVoice_prompts_create_prompt() -> None: + """Test create-prompt endpoint with WireMock""" + test_id = "empathic_voice.prompts.create_prompt.0" + client = get_client(test_id) + client.empathic_voice.prompts.create_prompt( + name="Weather Assistant Prompt", + text="You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", + ) + verify_request_count(test_id, "POST", "/v0/evi/prompts", None, 1) + + +def test_empathicVoice_prompts_list_prompt_versions() -> None: + """Test list-prompt-versions endpoint with WireMock""" + test_id = "empathic_voice.prompts.list_prompt_versions.0" + client = get_client(test_id) + client.empathic_voice.prompts.list_prompt_versions(id="af699d45-2985-42cc-91b9-af9e5da3bac5") + verify_request_count(test_id, "GET", "/v0/evi/prompts/af699d45-2985-42cc-91b9-af9e5da3bac5", None, 1) + + +def test_empathicVoice_prompts_create_prompt_version() -> None: + """Test create-prompt-version endpoint with WireMock""" + test_id = "empathic_voice.prompts.create_prompt_version.0" + client = get_client(test_id) + client.empathic_voice.prompts.create_prompt_version( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + text="You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", + version_description="This is an updated version of the Weather Assistant Prompt.", + ) + verify_request_count(test_id, "POST", "/v0/evi/prompts/af699d45-2985-42cc-91b9-af9e5da3bac5", None, 1) + + +def test_empathicVoice_prompts_delete_prompt() -> None: + """Test delete-prompt endpoint with WireMock""" + test_id = "empathic_voice.prompts.delete_prompt.0" + client = get_client(test_id) + client.empathic_voice.prompts.delete_prompt(id="af699d45-2985-42cc-91b9-af9e5da3bac5") + verify_request_count(test_id, "DELETE", "/v0/evi/prompts/af699d45-2985-42cc-91b9-af9e5da3bac5", None, 1) + + +def test_empathicVoice_prompts_update_prompt_name() -> None: + """Test update-prompt-name endpoint with WireMock""" + test_id = "empathic_voice.prompts.update_prompt_name.0" + client = get_client(test_id) + client.empathic_voice.prompts.update_prompt_name( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", name="Updated Weather Assistant Prompt Name" + ) + verify_request_count(test_id, "PATCH", "/v0/evi/prompts/af699d45-2985-42cc-91b9-af9e5da3bac5", None, 1) + + +def test_empathicVoice_prompts_get_prompt_version() -> None: + """Test get-prompt-version endpoint with WireMock""" + test_id = "empathic_voice.prompts.get_prompt_version.0" + client = get_client(test_id) + client.empathic_voice.prompts.get_prompt_version(id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0) + verify_request_count(test_id, "GET", "/v0/evi/prompts/af699d45-2985-42cc-91b9-af9e5da3bac5/version/0", None, 1) + + +def test_empathicVoice_prompts_delete_prompt_version() -> None: + """Test delete-prompt-version endpoint with WireMock""" + test_id = "empathic_voice.prompts.delete_prompt_version.0" + client = get_client(test_id) + client.empathic_voice.prompts.delete_prompt_version(id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=1) + verify_request_count(test_id, "DELETE", "/v0/evi/prompts/af699d45-2985-42cc-91b9-af9e5da3bac5/version/1", None, 1) + + +def test_empathicVoice_prompts_update_prompt_description() -> None: + """Test update-prompt-description endpoint with WireMock""" + test_id = "empathic_voice.prompts.update_prompt_description.0" + client = get_client(test_id) + client.empathic_voice.prompts.update_prompt_description( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=1, + version_description="This is an updated version_description.", + ) + verify_request_count(test_id, "PATCH", "/v0/evi/prompts/af699d45-2985-42cc-91b9-af9e5da3bac5/version/1", None, 1) diff --git a/tests/wire/test_empathicVoice_tools.py b/tests/wire/test_empathicVoice_tools.py new file mode 100644 index 00000000..d96cfc3a --- /dev/null +++ b/tests/wire/test_empathicVoice_tools.py @@ -0,0 +1,91 @@ +from .conftest import get_client, verify_request_count + + +def test_empathicVoice_tools_list_tools() -> None: + """Test list-tools endpoint with WireMock""" + test_id = "empathic_voice.tools.list_tools.0" + client = get_client(test_id) + client.empathic_voice.tools.list_tools(page_number=0, page_size=2) + verify_request_count(test_id, "GET", "/v0/evi/tools", {"page_number": "0", "page_size": "2"}, 1) + + +def test_empathicVoice_tools_create_tool() -> None: + """Test create-tool endpoint with WireMock""" + test_id = "empathic_voice.tools.create_tool.0" + client = get_client(test_id) + client.empathic_voice.tools.create_tool( + name="get_current_weather", + parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', + version_description="Fetches current weather and uses celsius or fahrenheit based on location of user.", + description="This tool is for getting the current weather.", + fallback_content="Unable to fetch current weather.", + ) + verify_request_count(test_id, "POST", "/v0/evi/tools", None, 1) + + +def test_empathicVoice_tools_list_tool_versions() -> None: + """Test list-tool-versions endpoint with WireMock""" + test_id = "empathic_voice.tools.list_tool_versions.0" + client = get_client(test_id) + client.empathic_voice.tools.list_tool_versions(id="00183a3f-79ba-413d-9f3b-609864268bea") + verify_request_count(test_id, "GET", "/v0/evi/tools/00183a3f-79ba-413d-9f3b-609864268bea", None, 1) + + +def test_empathicVoice_tools_create_tool_version() -> None: + """Test create-tool-version endpoint with WireMock""" + test_id = "empathic_voice.tools.create_tool_version.0" + client = get_client(test_id) + client.empathic_voice.tools.create_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit", "kelvin"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', + version_description="Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", + fallback_content="Unable to fetch current weather.", + description="This tool is for getting the current weather.", + ) + verify_request_count(test_id, "POST", "/v0/evi/tools/00183a3f-79ba-413d-9f3b-609864268bea", None, 1) + + +def test_empathicVoice_tools_delete_tool() -> None: + """Test delete-tool endpoint with WireMock""" + test_id = "empathic_voice.tools.delete_tool.0" + client = get_client(test_id) + client.empathic_voice.tools.delete_tool(id="00183a3f-79ba-413d-9f3b-609864268bea") + verify_request_count(test_id, "DELETE", "/v0/evi/tools/00183a3f-79ba-413d-9f3b-609864268bea", None, 1) + + +def test_empathicVoice_tools_update_tool_name() -> None: + """Test update-tool-name endpoint with WireMock""" + test_id = "empathic_voice.tools.update_tool_name.0" + client = get_client(test_id) + client.empathic_voice.tools.update_tool_name( + id="00183a3f-79ba-413d-9f3b-609864268bea", name="get_current_temperature" + ) + verify_request_count(test_id, "PATCH", "/v0/evi/tools/00183a3f-79ba-413d-9f3b-609864268bea", None, 1) + + +def test_empathicVoice_tools_get_tool_version() -> None: + """Test get-tool-version endpoint with WireMock""" + test_id = "empathic_voice.tools.get_tool_version.0" + client = get_client(test_id) + client.empathic_voice.tools.get_tool_version(id="00183a3f-79ba-413d-9f3b-609864268bea", version=1) + verify_request_count(test_id, "GET", "/v0/evi/tools/00183a3f-79ba-413d-9f3b-609864268bea/version/1", None, 1) + + +def test_empathicVoice_tools_delete_tool_version() -> None: + """Test delete-tool-version endpoint with WireMock""" + test_id = "empathic_voice.tools.delete_tool_version.0" + client = get_client(test_id) + client.empathic_voice.tools.delete_tool_version(id="00183a3f-79ba-413d-9f3b-609864268bea", version=1) + verify_request_count(test_id, "DELETE", "/v0/evi/tools/00183a3f-79ba-413d-9f3b-609864268bea/version/1", None, 1) + + +def test_empathicVoice_tools_update_tool_description() -> None: + """Test update-tool-description endpoint with WireMock""" + test_id = "empathic_voice.tools.update_tool_description.0" + client = get_client(test_id) + client.empathic_voice.tools.update_tool_description( + id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, + version_description="Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", + ) + verify_request_count(test_id, "PATCH", "/v0/evi/tools/00183a3f-79ba-413d-9f3b-609864268bea/version/1", None, 1) diff --git a/tests/wire/test_expressionMeasurement_batch.py b/tests/wire/test_expressionMeasurement_batch.py new file mode 100644 index 00000000..f1860691 --- /dev/null +++ b/tests/wire/test_expressionMeasurement_batch.py @@ -0,0 +1,43 @@ +from .conftest import get_client, verify_request_count + + +def test_expressionMeasurement_batch_list_jobs() -> None: + """Test list-jobs endpoint with WireMock""" + test_id = "expression_measurement.batch.list_jobs.0" + client = get_client(test_id) + client.expression_measurement.batch.list_jobs() + verify_request_count(test_id, "GET", "/v0/batch/jobs", None, 1) + + +def test_expressionMeasurement_batch_start_inference_job() -> None: + """Test start-inference-job endpoint with WireMock""" + test_id = "expression_measurement.batch.start_inference_job.0" + client = get_client(test_id) + client.expression_measurement.batch.start_inference_job( + urls=["https://hume-tutorials.s3.amazonaws.com/faces.zip"], notify=True + ) + verify_request_count(test_id, "POST", "/v0/batch/jobs", None, 1) + + +def test_expressionMeasurement_batch_get_job_details() -> None: + """Test get-job-details endpoint with WireMock""" + test_id = "expression_measurement.batch.get_job_details.0" + client = get_client(test_id) + client.expression_measurement.batch.get_job_details(id="job_id") + verify_request_count(test_id, "GET", "/v0/batch/jobs/job_id", None, 1) + + +def test_expressionMeasurement_batch_get_job_predictions() -> None: + """Test get-job-predictions endpoint with WireMock""" + test_id = "expression_measurement.batch.get_job_predictions.0" + client = get_client(test_id) + client.expression_measurement.batch.get_job_predictions(id="job_id") + verify_request_count(test_id, "GET", "/v0/batch/jobs/job_id/predictions", None, 1) + + +def test_expressionMeasurement_batch_start_inference_job_from_local_file() -> None: + """Test start-inference-job-from-local-file endpoint with WireMock""" + test_id = "expression_measurement.batch.start_inference_job_from_local_file.0" + client = get_client(test_id) + client.expression_measurement.batch.start_inference_job() + verify_request_count(test_id, "POST", "/v0/batch/jobs", None, 1) diff --git a/tests/wire/test_tts.py b/tests/wire/test_tts.py new file mode 100644 index 00000000..4ee2aa69 --- /dev/null +++ b/tests/wire/test_tts.py @@ -0,0 +1,79 @@ +from .conftest import get_client, verify_request_count + + +def test_tts_synthesize_json() -> None: + """Test synthesize-json endpoint with WireMock""" + test_id = "tts.synthesize_json.0" + client = get_client(test_id) + client.tts.synthesize_json( + context={}, + format={"type": "mp3"}, + num_generations=1, + utterances=[ + { + "text": "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.", + "description": "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.", + } + ], + ) + verify_request_count(test_id, "POST", "/v0/tts", None, 1) + + +def test_tts_synthesize_file() -> None: + """Test synthesize-file endpoint with WireMock""" + test_id = "tts.synthesize_file.0" + client = get_client(test_id) + # Must consume the iterator to trigger the HTTP request + list(client.tts.synthesize_file( + context={"generation_id": "09ad914d-8e7f-40f8-a279-e34f07f7dab2"}, + format={"type": "mp3"}, + num_generations=1, + utterances=[ + { + "text": "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.", + "description": "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.", + } + ], + )) + verify_request_count(test_id, "POST", "/v0/tts/file", None, 1) + + +def test_tts_synthesize_file_streaming() -> None: + """Test synthesize-file-streaming endpoint with WireMock""" + test_id = "tts.synthesize_file_streaming.0" + client = get_client(test_id) + # Must consume the iterator to trigger the HTTP request + list(client.tts.synthesize_file_streaming( + utterances=[ + { + "text": "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.", + "voice": {"provider": "HUME_AI"}, + } + ] + )) + verify_request_count(test_id, "POST", "/v0/tts/stream/file", None, 1) + + +def test_tts_synthesize_json_streaming() -> None: + """Test synthesize-json-streaming endpoint with WireMock""" + test_id = "tts.synthesize_json_streaming.0" + client = get_client(test_id) + # Must consume the iterator to trigger the HTTP request + list(client.tts.synthesize_json_streaming( + utterances=[ + { + "text": "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.", + "voice": {"provider": "HUME_AI"}, + } + ] + )) + verify_request_count(test_id, "POST", "/v0/tts/stream/json", None, 1) + + +def test_tts_convert_voice_json() -> None: + """Test convertVoiceJson endpoint with WireMock""" + test_id = "tts.convert_voice_json.0" + client = get_client(test_id) + # Must consume the iterator to trigger the HTTP request + list(client.tts.convert_voice_json(audio="example_audio")) + verify_request_count(test_id, "POST", "/v0/tts/voice_conversion/json", None, 1) diff --git a/tests/wire/test_tts_voices.py b/tests/wire/test_tts_voices.py new file mode 100644 index 00000000..5b8b6c45 --- /dev/null +++ b/tests/wire/test_tts_voices.py @@ -0,0 +1,25 @@ +from .conftest import get_client, verify_request_count + + +def test_tts_voices_list_() -> None: + """Test list endpoint with WireMock""" + test_id = "tts.voices.list_.0" + client = get_client(test_id) + client.tts.voices.list(provider="CUSTOM_VOICE") + verify_request_count(test_id, "GET", "/v0/tts/voices", {"provider": "CUSTOM_VOICE"}, 1) + + +def test_tts_voices_create() -> None: + """Test create endpoint with WireMock""" + test_id = "tts.voices.create.0" + client = get_client(test_id) + client.tts.voices.create(generation_id="795c949a-1510-4a80-9646-7d0863b023ab", name="David Hume") + verify_request_count(test_id, "POST", "/v0/tts/voices", None, 1) + + +def test_tts_voices_delete() -> None: + """Test delete endpoint with WireMock""" + test_id = "tts.voices.delete.0" + client = get_client(test_id) + client.tts.voices.delete(name="David Hume") + verify_request_count(test_id, "DELETE", "/v0/tts/voices", {"name": "David Hume"}, 1) diff --git a/wiremock/docker-compose.test.yml b/wiremock/docker-compose.test.yml new file mode 100644 index 00000000..f80c6b0a --- /dev/null +++ b/wiremock/docker-compose.test.yml @@ -0,0 +1,14 @@ +services: + wiremock: + image: wiremock/wiremock:3.9.1 + ports: + - "8080:8080" + volumes: + - ./wiremock-mappings.json:/home/wiremock/mappings/wiremock-mappings.json + command: ["--global-response-templating", "--verbose"] + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/__admin/health"] + interval: 2s + timeout: 5s + retries: 15 + start_period: 5s diff --git a/wiremock/wiremock-mappings.json b/wiremock/wiremock-mappings.json new file mode 100644 index 00000000..b4ee6ce6 --- /dev/null +++ b/wiremock/wiremock-mappings.json @@ -0,0 +1 @@ +{"mappings":[{"id":"1d60b8ea-f512-4ce0-92ad-0a086a4717a2","name":"List voices - default","request":{"urlPathTemplate":"/v0/tts/voices","method":"GET"},"response":{"status":200,"body":"{\n \"page_number\": 0,\n \"page_size\": 10,\n \"total_pages\": 1,\n \"voices_page\": [\n {\n \"id\": \"c42352c0-4566-455d-b180-0f654b65b525\",\n \"name\": \"David Hume\",\n \"provider\": \"CUSTOM_VOICE\"\n },\n {\n \"id\": \"d87352b0-26a3-4b11-081b-d157a5674d19\",\n \"name\": \"Goliath Hume\",\n \"provider\": \"CUSTOM_VOICE\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"1d60b8ea-f512-4ce0-92ad-0a086a4717a2","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"2c434990-d686-4fec-940d-7b86327bf9d5","name":"Create voice - default","request":{"urlPathTemplate":"/v0/tts/voices","method":"POST"},"response":{"status":200,"body":"{\n \"id\": \"c42352c0-4566-455d-b180-0f654b65b525\",\n \"name\": \"David Hume\",\n \"provider\": \"CUSTOM_VOICE\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"2c434990-d686-4fec-940d-7b86327bf9d5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"69163bef-50c2-4c89-b4f9-70f5a04bfec8","name":"Delete voice - default","request":{"urlPathTemplate":"/v0/tts/voices","method":"DELETE"},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"69163bef-50c2-4c89-b4f9-70f5a04bfec8","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"1a9089ce-a462-43bb-afa5-ed028dd296e5","name":"Text-to-Speech (Json) - default","request":{"urlPathTemplate":"/v0/tts","method":"POST"},"response":{"status":200,"body":"{\n \"generations\": [\n {\n \"audio\": \"//PExAA0DDYRvkpNfhv3JI5JZ...etc.\",\n \"duration\": 7.44225,\n \"encoding\": {\n \"format\": \"mp3\",\n \"sample_rate\": 48000\n },\n \"file_size\": 120192,\n \"generation_id\": \"795c949a-1510-4a80-9646-7d0863b023ab\",\n \"snippets\": [\n [\n {\n \"audio\": \"//PExAA0DDYRvkpNfhv3JI5JZ...etc.\",\n \"generation_id\": \"795c949a-1510-4a80-9646-7d0863b023ab\",\n \"id\": \"37b1b1b1-1b1b-1b1b-1b1b-1b1b1b1b1b1b\",\n \"text\": \"Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.\",\n \"utterance_index\": 0,\n \"timestamps\": []\n }\n ]\n ]\n }\n ],\n \"request_id\": \"66e01f90-4501-4aa0-bbaf-74f45dc15aa725906\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"1a9089ce-a462-43bb-afa5-ed028dd296e5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"449bb149-6027-4735-a265-0a0a5bc0d0ef","name":"Text-to-Speech (File) - default","request":{"urlPathTemplate":"/v0/tts/file","method":"POST"},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"449bb149-6027-4735-a265-0a0a5bc0d0ef","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"fd6bbe04-a38a-4d6e-bc3a-f8cf25725dbe","name":"Text-to-Speech (Streamed File) - default","request":{"urlPathTemplate":"/v0/tts/stream/file","method":"POST"},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"fd6bbe04-a38a-4d6e-bc3a-f8cf25725dbe","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"8e8e8262-38fd-4f81-836b-c9d7ee07cd84","name":"Text-to-Speech (Streamed JSON) - default","request":{"urlPathTemplate":"/v0/tts/stream/json","method":"POST"},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"8e8e8262-38fd-4f81-836b-c9d7ee07cd84","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"f75e0a57-c1d9-44df-80c7-c8485a7a76d6","name":"Voice Conversion (Streamed JSON) - default","request":{"urlPathTemplate":"/v0/tts/voice_conversion/json","method":"POST"},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"f75e0a57-c1d9-44df-80c7-c8485a7a76d6","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"e0fc3f32-35c4-4c49-89f2-eaa4e21b9444","name":"Send Message - default","request":{"urlPathTemplate":"/v0/evi/chat/{chat_id}/send","method":"POST","pathParameters":{"chat_id":{"equalTo":"chat_id"}}},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"e0fc3f32-35c4-4c49-89f2-eaa4e21b9444","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"7e5b16a7-b7b9-4e7d-b0a9-61b1ac4b5f7d","name":"List chat_groups - default","request":{"urlPathTemplate":"/v0/evi/chat_groups","method":"GET"},"response":{"status":200,"body":"{\n \"page_number\": 0,\n \"page_size\": 1,\n \"total_pages\": 1,\n \"pagination_direction\": \"ASC\",\n \"chat_groups_page\": [\n {\n \"id\": \"697056f0-6c7e-487d-9bd8-9c19df79f05f\",\n \"first_start_timestamp\": 1721844196397,\n \"most_recent_start_timestamp\": 1721861821717,\n \"active\": false,\n \"most_recent_chat_id\": \"dfdbdd4d-0ddf-418b-8fc4-80a266579d36\",\n \"num_chats\": 5\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"7e5b16a7-b7b9-4e7d-b0a9-61b1ac4b5f7d","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"52c3c012-9681-44fd-b0dd-d644aac44f8c","name":"Get chat_group - default","request":{"urlPathTemplate":"/v0/evi/chat_groups/{id}","method":"GET","pathParameters":{"id":{"equalTo":"697056f0-6c7e-487d-9bd8-9c19df79f05f"}}},"response":{"status":200,"body":"{\n \"id\": \"369846cf-6ad5-404d-905e-a8acb5cdfc78\",\n \"first_start_timestamp\": 1712334213647,\n \"most_recent_start_timestamp\": 1712334213647,\n \"num_chats\": 1,\n \"page_number\": 0,\n \"page_size\": 1,\n \"total_pages\": 1,\n \"pagination_direction\": \"ASC\",\n \"chats_page\": [\n {\n \"id\": \"6375d4f8-cd3e-4d6b-b13b-ace66b7c8aaa\",\n \"chat_group_id\": \"369846cf-6ad5-404d-905e-a8acb5cdfc78\",\n \"status\": \"USER_ENDED\",\n \"start_timestamp\": 1712334213647,\n \"end_timestamp\": 1712334332571,\n \"event_count\": 0,\n \"metadata\": null,\n \"config\": null\n }\n ],\n \"active\": false\n}","headers":{"Content-Type":"application/json"}},"uuid":"52c3c012-9681-44fd-b0dd-d644aac44f8c","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"ef5bd433-148e-481b-b653-7b4676a3fbbb","name":"Get chat group audio - default","request":{"urlPathTemplate":"/v0/evi/chat_groups/{id}/audio","method":"GET","pathParameters":{"id":{"equalTo":"369846cf-6ad5-404d-905e-a8acb5cdfc78"}}},"response":{"status":200,"body":"{\n \"id\": \"369846cf-6ad5-404d-905e-a8acb5cdfc78\",\n \"user_id\": \"e6235940-cfda-3988-9147-ff531627cf42\",\n \"num_chats\": 1,\n \"page_number\": 0,\n \"page_size\": 10,\n \"total_pages\": 1,\n \"pagination_direction\": \"ASC\",\n \"audio_reconstructions_page\": [\n {\n \"id\": \"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n \"user_id\": \"e6235940-cfda-3988-9147-ff531627cf42\",\n \"status\": \"COMPLETE\",\n \"filename\": \"e6235940-cfda-3988-9147-ff531627cf42/470a49f6-1dec-4afe-8b61-035d3b2d63b0/reconstructed_audio.mp4\",\n \"modified_at\": 1729875432555,\n \"signed_audio_url\": \"https://storage.googleapis.com/...etc.\",\n \"signed_url_expiration_timestamp_millis\": 1730232816964\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"ef5bd433-148e-481b-b653-7b4676a3fbbb","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"63f657bf-ebac-4bc4-bbae-5a46d2babec4","name":"List chat events from a specific chat_group - default","request":{"urlPathTemplate":"/v0/evi/chat_groups/{id}/events","method":"GET","pathParameters":{"id":{"equalTo":"697056f0-6c7e-487d-9bd8-9c19df79f05f"}}},"response":{"status":200,"body":"{\n \"id\": \"697056f0-6c7e-487d-9bd8-9c19df79f05f\",\n \"page_number\": 0,\n \"page_size\": 3,\n \"total_pages\": 1,\n \"pagination_direction\": \"ASC\",\n \"events_page\": [\n {\n \"id\": \"5d44bdbb-49a3-40fb-871d-32bf7e76efe7\",\n \"chat_id\": \"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n \"timestamp\": 1716244940762,\n \"role\": \"SYSTEM\",\n \"type\": \"SYSTEM_PROMPT\",\n \"message_text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\",\n \"emotion_features\": \"\",\n \"metadata\": \"\"\n },\n {\n \"id\": \"5976ddf6-d093-4bb9-ba60-8f6c25832dde\",\n \"chat_id\": \"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n \"timestamp\": 1716244956278,\n \"role\": \"USER\",\n \"type\": \"USER_MESSAGE\",\n \"message_text\": \"Hello.\",\n \"emotion_features\": \"{\\\"Admiration\\\": 0.09906005859375, \\\"Adoration\\\": 0.12213134765625, \\\"Aesthetic Appreciation\\\": 0.05035400390625, \\\"Amusement\\\": 0.16552734375, \\\"Anger\\\": 0.0037384033203125, \\\"Anxiety\\\": 0.010101318359375, \\\"Awe\\\": 0.058197021484375, \\\"Awkwardness\\\": 0.10552978515625, \\\"Boredom\\\": 0.1141357421875, \\\"Calmness\\\": 0.115234375, \\\"Concentration\\\": 0.00444793701171875, \\\"Confusion\\\": 0.0343017578125, \\\"Contemplation\\\": 0.00812530517578125, \\\"Contempt\\\": 0.009002685546875, \\\"Contentment\\\": 0.087158203125, \\\"Craving\\\": 0.00818634033203125, \\\"Desire\\\": 0.018310546875, \\\"Determination\\\": 0.003238677978515625, \\\"Disappointment\\\": 0.024169921875, \\\"Disgust\\\": 0.00702667236328125, \\\"Distress\\\": 0.00936126708984375, \\\"Doubt\\\": 0.00632476806640625, \\\"Ecstasy\\\": 0.0293731689453125, \\\"Embarrassment\\\": 0.01800537109375, \\\"Empathic Pain\\\": 0.0088348388671875, \\\"Entrancement\\\": 0.013397216796875, \\\"Envy\\\": 0.02557373046875, \\\"Excitement\\\": 0.12109375, \\\"Fear\\\": 0.004413604736328125, \\\"Guilt\\\": 0.016571044921875, \\\"Horror\\\": 0.00274658203125, \\\"Interest\\\": 0.2142333984375, \\\"Joy\\\": 0.29638671875, \\\"Love\\\": 0.16015625, \\\"Nostalgia\\\": 0.007843017578125, \\\"Pain\\\": 0.007160186767578125, \\\"Pride\\\": 0.00508880615234375, \\\"Realization\\\": 0.054229736328125, \\\"Relief\\\": 0.048736572265625, \\\"Romance\\\": 0.026397705078125, \\\"Sadness\\\": 0.0265350341796875, \\\"Satisfaction\\\": 0.051361083984375, \\\"Shame\\\": 0.00974273681640625, \\\"Surprise (negative)\\\": 0.0218963623046875, \\\"Surprise (positive)\\\": 0.216064453125, \\\"Sympathy\\\": 0.021728515625, \\\"Tiredness\\\": 0.0173797607421875, \\\"Triumph\\\": 0.004520416259765625}\",\n \"metadata\": \"{\\\"segments\\\": [{\\\"content\\\": \\\"Hello.\\\", \\\"embedding\\\": [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, 0.416259765625, 0.99462890625, -0.333740234375, 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, 0.228515625, 2.087890625, -0.311767578125, 0.053680419921875, 1.3349609375, 0.95068359375, 0.00441741943359375, 0.705078125, 1.8916015625, -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, -0.28857421875, -0.4560546875, -0.1500244140625, -0.1102294921875, -0.222412109375, 0.8779296875, 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, -0.325439453125, 0.412841796875, 0.81689453125, 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, 1.50390625, 1.0224609375, -1.671875, 0.7373046875, 2.1328125, 2.166015625, 0.41015625, -0.127685546875, 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, -0.00984954833984375, -0.6865234375, -0.0272979736328125, -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, 0.384521484375, 0.385986328125, 2.0546875, -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, -0.1552734375, 0.6474609375, -0.08331298828125, 0.00740814208984375, -0.045501708984375, -0.578125, 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, 1.99609375, 1.171875, 1.181640625, 1.5126953125, 0.0224456787109375, 0.58349609375, -1.4931640625, 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, 1.7802734375, 0.01526641845703125, -0.423095703125, 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, 0.69384765625, 1.375, 0.8916015625, 1.0107421875, 0.1304931640625, 2.009765625, 0.06402587890625, -0.08428955078125, 0.04351806640625, -1.7529296875, 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, -0.276611328125, 0.8837890625, -0.1287841796875, 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, 1.3623046875, 2.267578125, 0.484375, 0.9150390625, 0.56787109375, -0.70068359375, 0.27587890625, -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, 1.328125, 1.232421875, 0.6806640625, 0.9365234375, 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, 1.34765625, 2.8203125, 2.025390625, -0.48583984375, 0.7626953125, 0.01007843017578125, 1.435546875, 0.007205963134765625, 0.05157470703125, -0.9853515625, 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, -0.07916259765625, 1.244140625, -0.32080078125, 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, -0.263427734375, -0.019866943359375, -0.24658203125, -0.1871337890625, 0.927734375, 0.62255859375, 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, -1.92578125, 1.154296875, 0.389892578125, 1.130859375, 0.95947265625, 0.72314453125, 2.244140625, 0.048553466796875, 0.626953125, 0.42919921875, 0.82275390625, 0.311767578125, -0.320556640625, 0.01041412353515625, 0.1483154296875, 0.10809326171875, -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, 0.39208984375, 0.83251953125, 0.224365234375, 0.0019989013671875, 0.87548828125, 1.6572265625, 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, -0.40283203125, 4.109375, 2.533203125, 1.2529296875, 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, -0.79443359375, 0.71630859375, 0.97998046875, -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, -1.947265625, 1.3544921875, -3.935546875, 2.544921875, 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, 14.0703125, -2.0078125, -0.381591796875, 1.228515625, 0.08282470703125, -0.67822265625, -0.04339599609375, 0.397216796875, 0.1656494140625, 0.137451171875, 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, 0.232177734375, -0.020172119140625, 0.64404296875, -0.01100921630859375, -1.9267578125, 0.222412109375, 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, 1.828125, 1.115234375, 1.931640625, -0.517578125, 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, 2.046875, 3.212890625, 1.68359375, 1.07421875, -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, 0.2440185546875, 0.62646484375, -0.1280517578125, 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, 0.08807373046875, 0.18505859375, 0.8857421875, -0.57177734375, 0.251708984375, 0.234375, 2.57421875, 0.9599609375, 0.5029296875, 0.10382080078125, 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, 0.259765625, 2.015625, 2.828125, -0.3095703125, -0.164306640625, -0.3408203125, 0.486572265625, 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, 0.00972747802734375, -0.83154296875, 1.755859375, 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, -0.454833984375, 0.75439453125, 0.68505859375, 0.210693359375, -0.283935546875, -0.53564453125, 0.96826171875, 0.861328125, -3.33984375, -0.26171875, 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, -0.5380859375, 0.1529541015625, -0.360595703125, -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, 1.392578125, 0.5068359375, 0.962890625, 0.736328125, 1.55078125, 0.50390625, -0.398681640625, 2.361328125, 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, 0.1119384765625, -0.1834716796875, 1.4599609375, -0.77587890625, 0.5556640625, 0.09954833984375, 0.0285186767578125, 0.58935546875, -0.501953125, 0.212890625, 0.02679443359375, 0.1715087890625, 0.03466796875, -0.564453125, 2.029296875, 2.45703125, -0.72216796875, 2.138671875, 0.50830078125, -0.09356689453125, 0.230224609375, 1.6943359375, 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, 1.5791015625, -0.0921630859375, 0.484619140625, 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, 1.30859375, 1.0859375, 0.56494140625, 2.322265625, 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, 0.8427734375, 2.431640625, 0.66357421875, 3.203125, 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, -1.1396484375, 1.6533203125, 0.375244140625], \\\"scores\\\": [0.09906005859375, 0.12213134765625, 0.05035400390625, 0.16552734375, 0.0037384033203125, 0.010101318359375, 0.058197021484375, 0.10552978515625, 0.1141357421875, 0.115234375, 0.00444793701171875, 0.00812530517578125, 0.0343017578125, 0.009002685546875, 0.087158203125, 0.00818634033203125, 0.003238677978515625, 0.024169921875, 0.00702667236328125, 0.00936126708984375, 0.00632476806640625, 0.0293731689453125, 0.01800537109375, 0.0088348388671875, 0.013397216796875, 0.02557373046875, 0.12109375, 0.004413604736328125, 0.016571044921875, 0.00274658203125, 0.2142333984375, 0.29638671875, 0.16015625, 0.007843017578125, 0.007160186767578125, 0.00508880615234375, 0.054229736328125, 0.048736572265625, 0.026397705078125, 0.0265350341796875, 0.051361083984375, 0.018310546875, 0.00974273681640625, 0.0218963623046875, 0.216064453125, 0.021728515625, 0.0173797607421875, 0.004520416259765625], \\\"stoks\\\": [52, 52, 52, 52, 52, 41, 41, 374, 303, 303, 303, 427], \\\"time\\\": {\\\"begin_ms\\\": 640, \\\"end_ms\\\": 1140}}]}\"\n },\n {\n \"id\": \"7645a0d1-2e64-410d-83a8-b96040432e9a\",\n \"chat_id\": \"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n \"timestamp\": 1716244957031,\n \"role\": \"AGENT\",\n \"type\": \"AGENT_MESSAGE\",\n \"message_text\": \"Hello!\",\n \"emotion_features\": \"{\\\"Admiration\\\": 0.044921875, \\\"Adoration\\\": 0.0253753662109375, \\\"Aesthetic Appreciation\\\": 0.03265380859375, \\\"Amusement\\\": 0.118408203125, \\\"Anger\\\": 0.06719970703125, \\\"Anxiety\\\": 0.0411376953125, \\\"Awe\\\": 0.03802490234375, \\\"Awkwardness\\\": 0.056549072265625, \\\"Boredom\\\": 0.04217529296875, \\\"Calmness\\\": 0.08709716796875, \\\"Concentration\\\": 0.070556640625, \\\"Confusion\\\": 0.06964111328125, \\\"Contemplation\\\": 0.0343017578125, \\\"Contempt\\\": 0.037689208984375, \\\"Contentment\\\": 0.059417724609375, \\\"Craving\\\": 0.01132965087890625, \\\"Desire\\\": 0.01406097412109375, \\\"Determination\\\": 0.1143798828125, \\\"Disappointment\\\": 0.051177978515625, \\\"Disgust\\\": 0.028594970703125, \\\"Distress\\\": 0.054901123046875, \\\"Doubt\\\": 0.04638671875, \\\"Ecstasy\\\": 0.0258026123046875, \\\"Embarrassment\\\": 0.0222015380859375, \\\"Empathic Pain\\\": 0.015777587890625, \\\"Entrancement\\\": 0.0160980224609375, \\\"Envy\\\": 0.0163421630859375, \\\"Excitement\\\": 0.129638671875, \\\"Fear\\\": 0.03125, \\\"Guilt\\\": 0.01483917236328125, \\\"Horror\\\": 0.0194549560546875, \\\"Interest\\\": 0.1341552734375, \\\"Joy\\\": 0.0738525390625, \\\"Love\\\": 0.0216522216796875, \\\"Nostalgia\\\": 0.0210418701171875, \\\"Pain\\\": 0.020721435546875, \\\"Pride\\\": 0.05499267578125, \\\"Realization\\\": 0.0728759765625, \\\"Relief\\\": 0.04052734375, \\\"Romance\\\": 0.0129241943359375, \\\"Sadness\\\": 0.0254669189453125, \\\"Satisfaction\\\": 0.07159423828125, \\\"Shame\\\": 0.01495361328125, \\\"Surprise (negative)\\\": 0.05560302734375, \\\"Surprise (positive)\\\": 0.07965087890625, \\\"Sympathy\\\": 0.022247314453125, \\\"Tiredness\\\": 0.0194549560546875, \\\"Triumph\\\": 0.04107666015625}\",\n \"metadata\": \"\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"63f657bf-ebac-4bc4-bbae-5a46d2babec4","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"741b2853-034c-43df-9eb0-7e4ff5d57dec","name":"List chats - default","request":{"urlPathTemplate":"/v0/evi/chats","method":"GET"},"response":{"status":200,"body":"{\n \"page_number\": 0,\n \"page_size\": 1,\n \"total_pages\": 1,\n \"pagination_direction\": \"ASC\",\n \"chats_page\": [\n {\n \"id\": \"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n \"chat_group_id\": \"9fc18597-3567-42d5-94d6-935bde84bf2f\",\n \"status\": \"USER_ENDED\",\n \"start_timestamp\": 1716244940648,\n \"end_timestamp\": 1716244958546,\n \"event_count\": 3,\n \"metadata\": \"\",\n \"config\": {\n \"id\": \"1b60e1a0-cc59-424a-8d2c-189d354db3f3\",\n \"version\": 0\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"741b2853-034c-43df-9eb0-7e4ff5d57dec","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"333ce95c-76c7-4621-aa72-bb0ed90fcf50","name":"List chat events - default","request":{"urlPathTemplate":"/v0/evi/chats/{id}","method":"GET","pathParameters":{"id":{"equalTo":"470a49f6-1dec-4afe-8b61-035d3b2d63b0"}}},"response":{"status":200,"body":"{\n \"id\": \"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n \"chat_group_id\": \"9fc18597-3567-42d5-94d6-935bde84bf2f\",\n \"status\": \"USER_ENDED\",\n \"start_timestamp\": 1716244940648,\n \"pagination_direction\": \"ASC\",\n \"events_page\": [\n {\n \"id\": \"5d44bdbb-49a3-40fb-871d-32bf7e76efe7\",\n \"chat_id\": \"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n \"timestamp\": 1716244940762,\n \"role\": \"SYSTEM\",\n \"type\": \"SYSTEM_PROMPT\",\n \"message_text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\",\n \"emotion_features\": \"\",\n \"metadata\": \"\"\n },\n {\n \"id\": \"5976ddf6-d093-4bb9-ba60-8f6c25832dde\",\n \"chat_id\": \"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n \"timestamp\": 1716244956278,\n \"role\": \"USER\",\n \"type\": \"USER_MESSAGE\",\n \"message_text\": \"Hello.\",\n \"emotion_features\": \"{\\\"Admiration\\\": 0.09906005859375, \\\"Adoration\\\": 0.12213134765625, \\\"Aesthetic Appreciation\\\": 0.05035400390625, \\\"Amusement\\\": 0.16552734375, \\\"Anger\\\": 0.0037384033203125, \\\"Anxiety\\\": 0.010101318359375, \\\"Awe\\\": 0.058197021484375, \\\"Awkwardness\\\": 0.10552978515625, \\\"Boredom\\\": 0.1141357421875, \\\"Calmness\\\": 0.115234375, \\\"Concentration\\\": 0.00444793701171875, \\\"Confusion\\\": 0.0343017578125, \\\"Contemplation\\\": 0.00812530517578125, \\\"Contempt\\\": 0.009002685546875, \\\"Contentment\\\": 0.087158203125, \\\"Craving\\\": 0.00818634033203125, \\\"Desire\\\": 0.018310546875, \\\"Determination\\\": 0.003238677978515625, \\\"Disappointment\\\": 0.024169921875, \\\"Disgust\\\": 0.00702667236328125, \\\"Distress\\\": 0.00936126708984375, \\\"Doubt\\\": 0.00632476806640625, \\\"Ecstasy\\\": 0.0293731689453125, \\\"Embarrassment\\\": 0.01800537109375, \\\"Empathic Pain\\\": 0.0088348388671875, \\\"Entrancement\\\": 0.013397216796875, \\\"Envy\\\": 0.02557373046875, \\\"Excitement\\\": 0.12109375, \\\"Fear\\\": 0.004413604736328125, \\\"Guilt\\\": 0.016571044921875, \\\"Horror\\\": 0.00274658203125, \\\"Interest\\\": 0.2142333984375, \\\"Joy\\\": 0.29638671875, \\\"Love\\\": 0.16015625, \\\"Nostalgia\\\": 0.007843017578125, \\\"Pain\\\": 0.007160186767578125, \\\"Pride\\\": 0.00508880615234375, \\\"Realization\\\": 0.054229736328125, \\\"Relief\\\": 0.048736572265625, \\\"Romance\\\": 0.026397705078125, \\\"Sadness\\\": 0.0265350341796875, \\\"Satisfaction\\\": 0.051361083984375, \\\"Shame\\\": 0.00974273681640625, \\\"Surprise (negative)\\\": 0.0218963623046875, \\\"Surprise (positive)\\\": 0.216064453125, \\\"Sympathy\\\": 0.021728515625, \\\"Tiredness\\\": 0.0173797607421875, \\\"Triumph\\\": 0.004520416259765625}\",\n \"metadata\": \"{\\\"segments\\\": [{\\\"content\\\": \\\"Hello.\\\", \\\"embedding\\\": [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, 0.416259765625, 0.99462890625, -0.333740234375, 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, 0.228515625, 2.087890625, -0.311767578125, 0.053680419921875, 1.3349609375, 0.95068359375, 0.00441741943359375, 0.705078125, 1.8916015625, -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, -0.28857421875, -0.4560546875, -0.1500244140625, -0.1102294921875, -0.222412109375, 0.8779296875, 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, -0.325439453125, 0.412841796875, 0.81689453125, 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, 1.50390625, 1.0224609375, -1.671875, 0.7373046875, 2.1328125, 2.166015625, 0.41015625, -0.127685546875, 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, -0.00984954833984375, -0.6865234375, -0.0272979736328125, -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, 0.384521484375, 0.385986328125, 2.0546875, -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, -0.1552734375, 0.6474609375, -0.08331298828125, 0.00740814208984375, -0.045501708984375, -0.578125, 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, 1.99609375, 1.171875, 1.181640625, 1.5126953125, 0.0224456787109375, 0.58349609375, -1.4931640625, 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, 1.7802734375, 0.01526641845703125, -0.423095703125, 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, 0.69384765625, 1.375, 0.8916015625, 1.0107421875, 0.1304931640625, 2.009765625, 0.06402587890625, -0.08428955078125, 0.04351806640625, -1.7529296875, 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, -0.276611328125, 0.8837890625, -0.1287841796875, 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, 1.3623046875, 2.267578125, 0.484375, 0.9150390625, 0.56787109375, -0.70068359375, 0.27587890625, -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, 1.328125, 1.232421875, 0.6806640625, 0.9365234375, 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, 1.34765625, 2.8203125, 2.025390625, -0.48583984375, 0.7626953125, 0.01007843017578125, 1.435546875, 0.007205963134765625, 0.05157470703125, -0.9853515625, 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, -0.07916259765625, 1.244140625, -0.32080078125, 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, -0.263427734375, -0.019866943359375, -0.24658203125, -0.1871337890625, 0.927734375, 0.62255859375, 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, -1.92578125, 1.154296875, 0.389892578125, 1.130859375, 0.95947265625, 0.72314453125, 2.244140625, 0.048553466796875, 0.626953125, 0.42919921875, 0.82275390625, 0.311767578125, -0.320556640625, 0.01041412353515625, 0.1483154296875, 0.10809326171875, -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, 0.39208984375, 0.83251953125, 0.224365234375, 0.0019989013671875, 0.87548828125, 1.6572265625, 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, -0.40283203125, 4.109375, 2.533203125, 1.2529296875, 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, -0.79443359375, 0.71630859375, 0.97998046875, -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, -1.947265625, 1.3544921875, -3.935546875, 2.544921875, 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, 14.0703125, -2.0078125, -0.381591796875, 1.228515625, 0.08282470703125, -0.67822265625, -0.04339599609375, 0.397216796875, 0.1656494140625, 0.137451171875, 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, 0.232177734375, -0.020172119140625, 0.64404296875, -0.01100921630859375, -1.9267578125, 0.222412109375, 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, 1.828125, 1.115234375, 1.931640625, -0.517578125, 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, 2.046875, 3.212890625, 1.68359375, 1.07421875, -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, 0.2440185546875, 0.62646484375, -0.1280517578125, 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, 0.08807373046875, 0.18505859375, 0.8857421875, -0.57177734375, 0.251708984375, 0.234375, 2.57421875, 0.9599609375, 0.5029296875, 0.10382080078125, 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, 0.259765625, 2.015625, 2.828125, -0.3095703125, -0.164306640625, -0.3408203125, 0.486572265625, 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, 0.00972747802734375, -0.83154296875, 1.755859375, 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, -0.454833984375, 0.75439453125, 0.68505859375, 0.210693359375, -0.283935546875, -0.53564453125, 0.96826171875, 0.861328125, -3.33984375, -0.26171875, 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, -0.5380859375, 0.1529541015625, -0.360595703125, -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, 1.392578125, 0.5068359375, 0.962890625, 0.736328125, 1.55078125, 0.50390625, -0.398681640625, 2.361328125, 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, 0.1119384765625, -0.1834716796875, 1.4599609375, -0.77587890625, 0.5556640625, 0.09954833984375, 0.0285186767578125, 0.58935546875, -0.501953125, 0.212890625, 0.02679443359375, 0.1715087890625, 0.03466796875, -0.564453125, 2.029296875, 2.45703125, -0.72216796875, 2.138671875, 0.50830078125, -0.09356689453125, 0.230224609375, 1.6943359375, 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, 1.5791015625, -0.0921630859375, 0.484619140625, 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, 1.30859375, 1.0859375, 0.56494140625, 2.322265625, 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, 0.8427734375, 2.431640625, 0.66357421875, 3.203125, 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, -1.1396484375, 1.6533203125, 0.375244140625], \\\"scores\\\": [0.09906005859375, 0.12213134765625, 0.05035400390625, 0.16552734375, 0.0037384033203125, 0.010101318359375, 0.058197021484375, 0.10552978515625, 0.1141357421875, 0.115234375, 0.00444793701171875, 0.00812530517578125, 0.0343017578125, 0.009002685546875, 0.087158203125, 0.00818634033203125, 0.003238677978515625, 0.024169921875, 0.00702667236328125, 0.00936126708984375, 0.00632476806640625, 0.0293731689453125, 0.01800537109375, 0.0088348388671875, 0.013397216796875, 0.02557373046875, 0.12109375, 0.004413604736328125, 0.016571044921875, 0.00274658203125, 0.2142333984375, 0.29638671875, 0.16015625, 0.007843017578125, 0.007160186767578125, 0.00508880615234375, 0.054229736328125, 0.048736572265625, 0.026397705078125, 0.0265350341796875, 0.051361083984375, 0.018310546875, 0.00974273681640625, 0.0218963623046875, 0.216064453125, 0.021728515625, 0.0173797607421875, 0.004520416259765625], \\\"stoks\\\": [52, 52, 52, 52, 52, 41, 41, 374, 303, 303, 303, 427], \\\"time\\\": {\\\"begin_ms\\\": 640, \\\"end_ms\\\": 1140}}]}\"\n },\n {\n \"id\": \"7645a0d1-2e64-410d-83a8-b96040432e9a\",\n \"chat_id\": \"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n \"timestamp\": 1716244957031,\n \"role\": \"AGENT\",\n \"type\": \"AGENT_MESSAGE\",\n \"message_text\": \"Hello!\",\n \"emotion_features\": \"{\\\"Admiration\\\": 0.044921875, \\\"Adoration\\\": 0.0253753662109375, \\\"Aesthetic Appreciation\\\": 0.03265380859375, \\\"Amusement\\\": 0.118408203125, \\\"Anger\\\": 0.06719970703125, \\\"Anxiety\\\": 0.0411376953125, \\\"Awe\\\": 0.03802490234375, \\\"Awkwardness\\\": 0.056549072265625, \\\"Boredom\\\": 0.04217529296875, \\\"Calmness\\\": 0.08709716796875, \\\"Concentration\\\": 0.070556640625, \\\"Confusion\\\": 0.06964111328125, \\\"Contemplation\\\": 0.0343017578125, \\\"Contempt\\\": 0.037689208984375, \\\"Contentment\\\": 0.059417724609375, \\\"Craving\\\": 0.01132965087890625, \\\"Desire\\\": 0.01406097412109375, \\\"Determination\\\": 0.1143798828125, \\\"Disappointment\\\": 0.051177978515625, \\\"Disgust\\\": 0.028594970703125, \\\"Distress\\\": 0.054901123046875, \\\"Doubt\\\": 0.04638671875, \\\"Ecstasy\\\": 0.0258026123046875, \\\"Embarrassment\\\": 0.0222015380859375, \\\"Empathic Pain\\\": 0.015777587890625, \\\"Entrancement\\\": 0.0160980224609375, \\\"Envy\\\": 0.0163421630859375, \\\"Excitement\\\": 0.129638671875, \\\"Fear\\\": 0.03125, \\\"Guilt\\\": 0.01483917236328125, \\\"Horror\\\": 0.0194549560546875, \\\"Interest\\\": 0.1341552734375, \\\"Joy\\\": 0.0738525390625, \\\"Love\\\": 0.0216522216796875, \\\"Nostalgia\\\": 0.0210418701171875, \\\"Pain\\\": 0.020721435546875, \\\"Pride\\\": 0.05499267578125, \\\"Realization\\\": 0.0728759765625, \\\"Relief\\\": 0.04052734375, \\\"Romance\\\": 0.0129241943359375, \\\"Sadness\\\": 0.0254669189453125, \\\"Satisfaction\\\": 0.07159423828125, \\\"Shame\\\": 0.01495361328125, \\\"Surprise (negative)\\\": 0.05560302734375, \\\"Surprise (positive)\\\": 0.07965087890625, \\\"Sympathy\\\": 0.022247314453125, \\\"Tiredness\\\": 0.0194549560546875, \\\"Triumph\\\": 0.04107666015625}\",\n \"metadata\": \"\"\n }\n ],\n \"page_number\": 0,\n \"page_size\": 3,\n \"total_pages\": 1,\n \"end_timestamp\": 1716244958546,\n \"metadata\": \"\",\n \"config\": {\n \"id\": \"1b60e1a0-cc59-424a-8d2c-189d354db3f3\",\n \"version\": 0\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"333ce95c-76c7-4621-aa72-bb0ed90fcf50","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"1c210e60-fc3e-4020-ba31-155c211461a5","name":"Get chat audio - default","request":{"urlPathTemplate":"/v0/evi/chats/{id}/audio","method":"GET","pathParameters":{"id":{"equalTo":"470a49f6-1dec-4afe-8b61-035d3b2d63b0"}}},"response":{"status":200,"body":"{\n \"id\": \"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n \"user_id\": \"e6235940-cfda-3988-9147-ff531627cf42\",\n \"status\": \"COMPLETE\",\n \"filename\": \"e6235940-cfda-3988-9147-ff531627cf42/470a49f6-1dec-4afe-8b61-035d3b2d63b0/reconstructed_audio.mp4\",\n \"modified_at\": 1729875432555,\n \"signed_audio_url\": \"https://storage.googleapis.com/...etc.\",\n \"signed_url_expiration_timestamp_millis\": 1730232816964\n}","headers":{"Content-Type":"application/json"}},"uuid":"1c210e60-fc3e-4020-ba31-155c211461a5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"95990ecd-a7f2-495c-84ec-e0b2f0f1e471","name":"List configs - default","request":{"urlPathTemplate":"/v0/evi/configs","method":"GET"},"response":{"status":200,"body":"{\n \"page_number\": 0,\n \"page_size\": 1,\n \"total_pages\": 1,\n \"configs_page\": [\n {\n \"id\": \"1b60e1a0-cc59-424a-8d2c-189d354db3f3\",\n \"version\": 0,\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Config\",\n \"created_on\": 1715267200693,\n \"modified_on\": 1715267200693,\n \"evi_version\": \"3\",\n \"prompt\": {\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1715267200693,\n \"modified_on\": 1715267200693,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n },\n \"voice\": {\n \"provider\": \"HUME_AI\",\n \"name\": \"Ava Song\",\n \"id\": \"5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c\"\n },\n \"language_model\": {\n \"model_provider\": \"ANTHROPIC\",\n \"model_resource\": \"claude-3-7-sonnet-latest\",\n \"temperature\": 1\n },\n \"ellm_model\": {\n \"allow_short_responses\": false\n },\n \"tools\": [],\n \"builtin_tools\": [],\n \"event_messages\": {\n \"on_new_chat\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_inactivity_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_max_duration_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n }\n },\n \"timeouts\": {\n \"inactivity\": {\n \"enabled\": true,\n \"duration_secs\": 600\n },\n \"max_duration\": {\n \"enabled\": true,\n \"duration_secs\": 1800\n }\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"95990ecd-a7f2-495c-84ec-e0b2f0f1e471","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"73a53d69-e4fb-44c0-a4cc-3ebdee8e8c36","name":"Create config - default","request":{"urlPathTemplate":"/v0/evi/configs","method":"POST"},"response":{"status":201,"body":"{\n \"id\": \"1b60e1a0-cc59-424a-8d2c-189d354db3f3\",\n \"version\": 0,\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Config\",\n \"created_on\": 1715275452390,\n \"modified_on\": 1715275452390,\n \"evi_version\": \"3\",\n \"prompt\": {\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1715267200693,\n \"modified_on\": 1715267200693,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n },\n \"voice\": {\n \"provider\": \"HUME_AI\",\n \"name\": \"Ava Song\",\n \"id\": \"5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c\"\n },\n \"language_model\": {\n \"model_provider\": \"ANTHROPIC\",\n \"model_resource\": \"claude-3-7-sonnet-latest\",\n \"temperature\": 1\n },\n \"ellm_model\": {\n \"allow_short_responses\": false\n },\n \"tools\": [],\n \"builtin_tools\": [],\n \"event_messages\": {\n \"on_new_chat\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_inactivity_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_max_duration_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n }\n },\n \"timeouts\": {\n \"inactivity\": {\n \"enabled\": true,\n \"duration_secs\": 600\n },\n \"max_duration\": {\n \"enabled\": true,\n \"duration_secs\": 1800\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"73a53d69-e4fb-44c0-a4cc-3ebdee8e8c36","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"9fa7c906-3213-4358-9fd3-fb98a80ccff9","name":"List config versions - default","request":{"urlPathTemplate":"/v0/evi/configs/{id}","method":"GET","pathParameters":{"id":{"equalTo":"1b60e1a0-cc59-424a-8d2c-189d354db3f3"}}},"response":{"status":200,"body":"{\n \"page_number\": 0,\n \"page_size\": 10,\n \"total_pages\": 1,\n \"configs_page\": [\n {\n \"id\": \"1b60e1a0-cc59-424a-8d2c-189d354db3f3\",\n \"version\": 0,\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Config\",\n \"created_on\": 1715275452390,\n \"modified_on\": 1715275452390,\n \"evi_version\": \"3\",\n \"prompt\": {\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1715267200693,\n \"modified_on\": 1715267200693,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n },\n \"voice\": {\n \"provider\": \"HUME_AI\",\n \"name\": \"Ava Song\",\n \"id\": \"5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c\"\n },\n \"language_model\": {\n \"model_provider\": \"ANTHROPIC\",\n \"model_resource\": \"claude-3-7-sonnet-latest\",\n \"temperature\": 1\n },\n \"ellm_model\": {\n \"allow_short_responses\": false\n },\n \"tools\": [],\n \"builtin_tools\": [],\n \"event_messages\": {\n \"on_new_chat\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_inactivity_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_max_duration_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n }\n },\n \"timeouts\": {\n \"inactivity\": {\n \"enabled\": true,\n \"duration_secs\": 600\n },\n \"max_duration\": {\n \"enabled\": true,\n \"duration_secs\": 1800\n }\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"9fa7c906-3213-4358-9fd3-fb98a80ccff9","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"66129a1a-71df-4899-a702-a2582339ad92","name":"Create config version - default","request":{"urlPathTemplate":"/v0/evi/configs/{id}","method":"POST","pathParameters":{"id":{"equalTo":"1b60e1a0-cc59-424a-8d2c-189d354db3f3"}}},"response":{"status":201,"body":"{\n \"id\": \"1b60e1a0-cc59-424a-8d2c-189d354db3f3\",\n \"version\": 1,\n \"version_description\": \"This is an updated version of the Weather Assistant Config.\",\n \"name\": \"Weather Assistant Config\",\n \"created_on\": 1715275452390,\n \"modified_on\": 1722642242998,\n \"evi_version\": \"3\",\n \"prompt\": {\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1715267200693,\n \"modified_on\": 1715267200693,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n },\n \"voice\": {\n \"provider\": \"HUME_AI\",\n \"name\": \"Ava Song\",\n \"id\": \"5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c\"\n },\n \"language_model\": {\n \"model_provider\": \"ANTHROPIC\",\n \"model_resource\": \"claude-3-7-sonnet-latest\",\n \"temperature\": 1\n },\n \"ellm_model\": {\n \"allow_short_responses\": true\n },\n \"tools\": [],\n \"builtin_tools\": [],\n \"event_messages\": {\n \"on_new_chat\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_inactivity_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_max_duration_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n }\n },\n \"timeouts\": {\n \"inactivity\": {\n \"enabled\": true,\n \"duration_secs\": 600\n },\n \"max_duration\": {\n \"enabled\": true,\n \"duration_secs\": 1800\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"66129a1a-71df-4899-a702-a2582339ad92","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"96ab709e-8ce5-42ad-8930-11174405a07f","name":"Delete config - default","request":{"urlPathTemplate":"/v0/evi/configs/{id}","method":"DELETE","pathParameters":{"id":{"equalTo":"1b60e1a0-cc59-424a-8d2c-189d354db3f3"}}},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"96ab709e-8ce5-42ad-8930-11174405a07f","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"ac036d7f-9200-454a-ad8b-058cedce8018","name":"Update config name - default","request":{"urlPathTemplate":"/v0/evi/configs/{id}","method":"PATCH","pathParameters":{"id":{"equalTo":"1b60e1a0-cc59-424a-8d2c-189d354db3f3"}}},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"ac036d7f-9200-454a-ad8b-058cedce8018","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"336e4516-6577-4544-8e15-559222ec726d","name":"Get config version - default","request":{"urlPathTemplate":"/v0/evi/configs/{id}/version/{version}","method":"GET","pathParameters":{"id":{"equalTo":"1b60e1a0-cc59-424a-8d2c-189d354db3f3"},"version":{"equalTo":"1"}}},"response":{"status":200,"body":"{\n \"id\": \"1b60e1a0-cc59-424a-8d2c-189d354db3f3\",\n \"version\": 1,\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Config\",\n \"created_on\": 1715275452390,\n \"modified_on\": 1715275452390,\n \"evi_version\": \"3\",\n \"prompt\": {\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1715267200693,\n \"modified_on\": 1715267200693,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n },\n \"voice\": {\n \"provider\": \"HUME_AI\",\n \"name\": \"Ava Song\",\n \"id\": \"5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c\"\n },\n \"language_model\": {\n \"model_provider\": \"ANTHROPIC\",\n \"model_resource\": \"claude-3-7-sonnet-latest\",\n \"temperature\": 1\n },\n \"ellm_model\": {\n \"allow_short_responses\": false\n },\n \"tools\": [],\n \"builtin_tools\": [],\n \"event_messages\": {\n \"on_new_chat\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_inactivity_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_max_duration_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n }\n },\n \"timeouts\": {\n \"inactivity\": {\n \"enabled\": true,\n \"duration_secs\": 600\n },\n \"max_duration\": {\n \"enabled\": true,\n \"duration_secs\": 1800\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"336e4516-6577-4544-8e15-559222ec726d","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"3631f032-af11-4788-9ada-63c97ec90d03","name":"Delete config version - default","request":{"urlPathTemplate":"/v0/evi/configs/{id}/version/{version}","method":"DELETE","pathParameters":{"id":{"equalTo":"1b60e1a0-cc59-424a-8d2c-189d354db3f3"},"version":{"equalTo":"1"}}},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"3631f032-af11-4788-9ada-63c97ec90d03","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"4a8a419e-4772-4077-95b5-281c2bd2851b","name":"Update config description - default","request":{"urlPathTemplate":"/v0/evi/configs/{id}/version/{version}","method":"PATCH","pathParameters":{"id":{"equalTo":"1b60e1a0-cc59-424a-8d2c-189d354db3f3"},"version":{"equalTo":"1"}}},"response":{"status":200,"body":"{\n \"id\": \"1b60e1a0-cc59-424a-8d2c-189d354db3f3\",\n \"version\": 1,\n \"version_description\": \"This is an updated version_description.\",\n \"name\": \"Weather Assistant Config\",\n \"created_on\": 1715275452390,\n \"modified_on\": 1715275452390,\n \"evi_version\": \"3\",\n \"prompt\": {\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1715267200693,\n \"modified_on\": 1715267200693,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n },\n \"voice\": {\n \"provider\": \"HUME_AI\",\n \"name\": \"Ava Song\",\n \"id\": \"5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c\"\n },\n \"language_model\": {\n \"model_provider\": \"ANTHROPIC\",\n \"model_resource\": \"claude-3-7-sonnet-latest\",\n \"temperature\": 1\n },\n \"ellm_model\": {\n \"allow_short_responses\": false\n },\n \"tools\": [],\n \"builtin_tools\": [],\n \"event_messages\": {\n \"on_new_chat\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_inactivity_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n },\n \"on_max_duration_timeout\": {\n \"enabled\": false,\n \"text\": \"\"\n }\n },\n \"timeouts\": {\n \"inactivity\": {\n \"enabled\": true,\n \"duration_secs\": 600\n },\n \"max_duration\": {\n \"enabled\": true,\n \"duration_secs\": 1800\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"4a8a419e-4772-4077-95b5-281c2bd2851b","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"3760f9c4-fbaa-40e9-9770-35af4592adb1","name":"List prompts - default","request":{"urlPathTemplate":"/v0/evi/prompts","method":"GET"},"response":{"status":200,"body":"{\n \"page_number\": 0,\n \"page_size\": 2,\n \"total_pages\": 1,\n \"prompts_page\": [\n {\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1715267200693,\n \"modified_on\": 1715267200693,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n },\n {\n \"id\": \"616b2b4c-a096-4445-9c23-64058b564fc2\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"\",\n \"name\": \"Web Search Assistant Prompt\",\n \"created_on\": 1715267200693,\n \"modified_on\": 1715267200693,\n \"text\": \"You are an AI web search assistant designed to help users find accurate and relevant information on the web. Respond to user queries promptly, using the built-in web search tool to retrieve up-to-date results. Present information clearly and concisely, summarizing key points where necessary. Use simple language and avoid technical jargon. If needed, provide helpful tips for refining search queries to obtain better results.\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"3760f9c4-fbaa-40e9-9770-35af4592adb1","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"1fab19ce-cf01-4f16-b221-518ebe235e05","name":"Create prompt - default","request":{"urlPathTemplate":"/v0/evi/prompts","method":"POST"},"response":{"status":201,"body":"{\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": null,\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1722633247488,\n \"modified_on\": 1722633247488,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"1fab19ce-cf01-4f16-b221-518ebe235e05","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"818fa71c-38b4-45da-92f3-167567201251","name":"List prompt versions - default","request":{"urlPathTemplate":"/v0/evi/prompts/{id}","method":"GET","pathParameters":{"id":{"equalTo":"af699d45-2985-42cc-91b9-af9e5da3bac5"}}},"response":{"status":200,"body":"{\n \"page_number\": 0,\n \"page_size\": 10,\n \"total_pages\": 1,\n \"prompts_page\": [\n {\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1722633247488,\n \"modified_on\": 1722633247488,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"818fa71c-38b4-45da-92f3-167567201251","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b2978df3-51e4-4d99-b160-6287c20dda6f","name":"Create prompt version - default","request":{"urlPathTemplate":"/v0/evi/prompts/{id}","method":"POST","pathParameters":{"id":{"equalTo":"af699d45-2985-42cc-91b9-af9e5da3bac5"}}},"response":{"status":201,"body":"{\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 1,\n \"version_type\": \"FIXED\",\n \"version_description\": \"This is an updated version of the Weather Assistant Prompt.\",\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1722633247488,\n \"modified_on\": 1722635140150,\n \"text\": \"You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"b2978df3-51e4-4d99-b160-6287c20dda6f","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"722d1802-e538-4d83-9720-79ff982e0011","name":"Delete prompt - default","request":{"urlPathTemplate":"/v0/evi/prompts/{id}","method":"DELETE","pathParameters":{"id":{"equalTo":"af699d45-2985-42cc-91b9-af9e5da3bac5"}}},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"722d1802-e538-4d83-9720-79ff982e0011","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"6e0c3636-6d00-4e26-92d7-40460ef14c6c","name":"Update prompt name - default","request":{"urlPathTemplate":"/v0/evi/prompts/{id}","method":"PATCH","pathParameters":{"id":{"equalTo":"af699d45-2985-42cc-91b9-af9e5da3bac5"}}},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"6e0c3636-6d00-4e26-92d7-40460ef14c6c","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"3812aa90-4c73-4d5b-a38b-d5ca8efde008","name":"Get prompt version - default","request":{"urlPathTemplate":"/v0/evi/prompts/{id}/version/{version}","method":"GET","pathParameters":{"id":{"equalTo":"af699d45-2985-42cc-91b9-af9e5da3bac5"},"version":{"equalTo":"0"}}},"response":{"status":200,"body":"{\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"\",\n \"name\": \"Weather Assistant Prompt\",\n \"created_on\": 1722633247488,\n \"modified_on\": 1722633247488,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"3812aa90-4c73-4d5b-a38b-d5ca8efde008","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"27f84b39-bfde-4b0f-a49e-fbd93767a180","name":"Delete prompt version - default","request":{"urlPathTemplate":"/v0/evi/prompts/{id}/version/{version}","method":"DELETE","pathParameters":{"id":{"equalTo":"af699d45-2985-42cc-91b9-af9e5da3bac5"},"version":{"equalTo":"1"}}},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"27f84b39-bfde-4b0f-a49e-fbd93767a180","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"760d3795-9a2c-4a38-940a-6b7459dff285","name":"Update prompt description - default","request":{"urlPathTemplate":"/v0/evi/prompts/{id}/version/{version}","method":"PATCH","pathParameters":{"id":{"equalTo":"af699d45-2985-42cc-91b9-af9e5da3bac5"},"version":{"equalTo":"1"}}},"response":{"status":200,"body":"{\n \"id\": \"af699d45-2985-42cc-91b9-af9e5da3bac5\",\n \"version\": 1,\n \"version_type\": \"FIXED\",\n \"version_description\": \"This is an updated version_description.\",\n \"name\": \"string\",\n \"created_on\": 1722633247488,\n \"modified_on\": 1722634770585,\n \"text\": \"You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"760d3795-9a2c-4a38-940a-6b7459dff285","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"76070823-253f-4b18-9c68-a80f6d2373ee","name":"List tools - default","request":{"urlPathTemplate":"/v0/evi/tools","method":"GET"},"response":{"status":200,"body":"{\n \"page_number\": 0,\n \"page_size\": 2,\n \"total_pages\": 1,\n \"tools_page\": [\n {\n \"tool_type\": \"FUNCTION\",\n \"id\": \"d20827af-5d8d-4f66-b6b9-ce2e3e1ea2b2\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"Fetches user's current location.\",\n \"name\": \"get_current_location\",\n \"created_on\": 1715267200693,\n \"modified_on\": 1715267200693,\n \"fallback_content\": \"Unable to fetch location.\",\n \"description\": \"Fetches user's current location.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }}, \\\"required\\\": [\\\"location\\\"] }\"\n },\n {\n \"tool_type\": \"FUNCTION\",\n \"id\": \"4442f3ea-9038-40e3-a2ce-1522b7de770f\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"Fetches current weather and uses celsius or fahrenheit based on location of user.\",\n \"name\": \"get_current_weather\",\n \"created_on\": 1715266126705,\n \"modified_on\": 1715266126705,\n \"fallback_content\": \"Unable to fetch location.\",\n \"description\": \"Fetches current weather and uses celsius or fahrenheit based on location of user.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"76070823-253f-4b18-9c68-a80f6d2373ee","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"7dc71f1b-3d70-4527-a21f-326f7db77dec","name":"Create tool - default","request":{"urlPathTemplate":"/v0/evi/tools","method":"POST"},"response":{"status":201,"body":"{\n \"tool_type\": \"FUNCTION\",\n \"id\": \"aa9b71c4-723c-47ff-9f83-1a1829e74376\",\n \"version\": 0,\n \"version_type\": \"FIXED\",\n \"version_description\": \"Fetches current weather and uses celsius or fahrenheit based on location of user.\",\n \"name\": \"get_current_weather\",\n \"created_on\": 1715275452390,\n \"modified_on\": 1715275452390,\n \"fallback_content\": \"Unable to fetch current weather.\",\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"7dc71f1b-3d70-4527-a21f-326f7db77dec","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"97f54c38-2fe1-4344-9355-88765ad39e92","name":"List tool versions - default","request":{"urlPathTemplate":"/v0/evi/tools/{id}","method":"GET","pathParameters":{"id":{"equalTo":"00183a3f-79ba-413d-9f3b-609864268bea"}}},"response":{"status":200,"body":"{\n \"page_number\": 0,\n \"page_size\": 10,\n \"total_pages\": 1,\n \"tools_page\": [\n {\n \"tool_type\": \"FUNCTION\",\n \"id\": \"00183a3f-79ba-413d-9f3b-609864268bea\",\n \"version\": 1,\n \"version_type\": \"FIXED\",\n \"version_description\": \"Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.\",\n \"name\": \"get_current_weather\",\n \"created_on\": 1715277014228,\n \"modified_on\": 1715277602313,\n \"fallback_content\": \"Unable to fetch current weather.\",\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\", \\\"kelvin\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"97f54c38-2fe1-4344-9355-88765ad39e92","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"1230a7c7-82ee-4940-98d3-3f0e1acc2cba","name":"Create tool version - default","request":{"urlPathTemplate":"/v0/evi/tools/{id}","method":"POST","pathParameters":{"id":{"equalTo":"00183a3f-79ba-413d-9f3b-609864268bea"}}},"response":{"status":201,"body":"{\n \"tool_type\": \"FUNCTION\",\n \"id\": \"00183a3f-79ba-413d-9f3b-609864268bea\",\n \"version\": 1,\n \"version_type\": \"FIXED\",\n \"version_description\": \"Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.\",\n \"name\": \"get_current_weather\",\n \"created_on\": 1715277014228,\n \"modified_on\": 1715277602313,\n \"fallback_content\": \"Unable to fetch current weather.\",\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\", \\\"kelvin\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"1230a7c7-82ee-4940-98d3-3f0e1acc2cba","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"6e58cf98-dbd8-465b-a6cc-53941c38f006","name":"Delete tool - default","request":{"urlPathTemplate":"/v0/evi/tools/{id}","method":"DELETE","pathParameters":{"id":{"equalTo":"00183a3f-79ba-413d-9f3b-609864268bea"}}},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"6e58cf98-dbd8-465b-a6cc-53941c38f006","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"7bb920f0-9b75-4374-bdfb-540a599f3fce","name":"Update tool name - default","request":{"urlPathTemplate":"/v0/evi/tools/{id}","method":"PATCH","pathParameters":{"id":{"equalTo":"00183a3f-79ba-413d-9f3b-609864268bea"}}},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"7bb920f0-9b75-4374-bdfb-540a599f3fce","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"f4fe78ec-6b74-402d-8aef-0cbd04c6a473","name":"Get tool version - default","request":{"urlPathTemplate":"/v0/evi/tools/{id}/version/{version}","method":"GET","pathParameters":{"id":{"equalTo":"00183a3f-79ba-413d-9f3b-609864268bea"},"version":{"equalTo":"1"}}},"response":{"status":200,"body":"{\n \"tool_type\": \"FUNCTION\",\n \"id\": \"00183a3f-79ba-413d-9f3b-609864268bea\",\n \"version\": 1,\n \"version_type\": \"FIXED\",\n \"version_description\": \"Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.\",\n \"name\": \"string\",\n \"created_on\": 1715277014228,\n \"modified_on\": 1715277602313,\n \"fallback_content\": \"Unable to fetch current weather.\",\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\", \\\"kelvin\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"f4fe78ec-6b74-402d-8aef-0cbd04c6a473","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"74ef73ce-49e2-492e-a4cb-ea6a1dc9b948","name":"Delete tool version - default","request":{"urlPathTemplate":"/v0/evi/tools/{id}/version/{version}","method":"DELETE","pathParameters":{"id":{"equalTo":"00183a3f-79ba-413d-9f3b-609864268bea"},"version":{"equalTo":"1"}}},"response":{"status":200,"body":"\"\"","headers":{"Content-Type":"application/json"}},"uuid":"74ef73ce-49e2-492e-a4cb-ea6a1dc9b948","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"82d8c062-5280-4378-afad-8057ebea037a","name":"Update tool description - default","request":{"urlPathTemplate":"/v0/evi/tools/{id}/version/{version}","method":"PATCH","pathParameters":{"id":{"equalTo":"00183a3f-79ba-413d-9f3b-609864268bea"},"version":{"equalTo":"1"}}},"response":{"status":200,"body":"{\n \"tool_type\": \"FUNCTION\",\n \"id\": \"00183a3f-79ba-413d-9f3b-609864268bea\",\n \"version\": 1,\n \"version_type\": \"FIXED\",\n \"version_description\": \"Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.\",\n \"name\": \"string\",\n \"created_on\": 1715277014228,\n \"modified_on\": 1715277602313,\n \"fallback_content\": \"Unable to fetch current weather.\",\n \"description\": \"This tool is for getting the current weather.\",\n \"parameters\": \"{ \\\"type\\\": \\\"object\\\", \\\"properties\\\": { \\\"location\\\": { \\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city and state, e.g. San Francisco, CA\\\" }, \\\"format\\\": { \\\"type\\\": \\\"string\\\", \\\"enum\\\": [\\\"celsius\\\", \\\"fahrenheit\\\", \\\"kelvin\\\"], \\\"description\\\": \\\"The temperature unit to use. Infer this from the users location.\\\" } }, \\\"required\\\": [\\\"location\\\", \\\"format\\\"] }\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"82d8c062-5280-4378-afad-8057ebea037a","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"ac2d855e-1240-4aa0-88e2-61efb0c0026a","name":"List jobs - default","request":{"urlPathTemplate":"/v0/batch/jobs","method":"GET"},"response":{"status":200,"body":"[\n {\n \"job_id\": \"job_id\",\n \"request\": {\n \"callback_url\": null,\n \"files\": [\n {\n \"filename\": \"filename\",\n \"md5sum\": \"md5sum\",\n \"content_type\": \"content_type\"\n }\n ],\n \"models\": {\n \"burst\": {},\n \"face\": {\n \"descriptions\": null,\n \"facs\": null,\n \"fps_pred\": 3,\n \"identify_faces\": false,\n \"min_face_size\": 60,\n \"prob_threshold\": 0.99,\n \"save_faces\": false\n },\n \"facemesh\": {},\n \"language\": {\n \"granularity\": \"word\",\n \"identify_speakers\": false,\n \"sentiment\": null,\n \"toxicity\": null\n },\n \"ner\": {\n \"identify_speakers\": false\n },\n \"prosody\": {\n \"granularity\": \"utterance\",\n \"identify_speakers\": false,\n \"window\": null\n }\n },\n \"notify\": true,\n \"text\": [],\n \"urls\": [\n \"https://hume-tutorials.s3.amazonaws.com/faces.zip\"\n ]\n },\n \"state\": {\n \"created_timestamp_ms\": 1712587158717,\n \"ended_timestamp_ms\": 1712587159274,\n \"num_errors\": 0,\n \"num_predictions\": 10,\n \"started_timestamp_ms\": 1712587158800,\n \"status\": \"COMPLETED\"\n },\n \"type\": \"INFERENCE\"\n }\n]","headers":{"Content-Type":"application/json"}},"uuid":"ac2d855e-1240-4aa0-88e2-61efb0c0026a","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"53d86d3c-c150-433f-ab12-ef8e8e9210eb","name":"Start inference job - default","request":{"urlPathTemplate":"/v0/batch/jobs","method":"POST"},"response":{"status":200,"body":"{\n \"job_id\": \"job_id\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"53d86d3c-c150-433f-ab12-ef8e8e9210eb","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b5838b5a-8fe8-4d6b-9439-5e77b6a8b017","name":"Get job details - default","request":{"urlPathTemplate":"/v0/batch/jobs/{id}","method":"GET","pathParameters":{"id":{"equalTo":"job_id"}}},"response":{"status":200,"body":"{\n \"type\": \"INFERENCE\",\n \"job_id\": \"job_id\",\n \"request\": {\n \"callback_url\": null,\n \"files\": [],\n \"models\": {\n \"burst\": {},\n \"face\": {\n \"descriptions\": null,\n \"facs\": null,\n \"fps_pred\": 3,\n \"identify_faces\": false,\n \"min_face_size\": 60,\n \"prob_threshold\": 0.99,\n \"save_faces\": false\n },\n \"facemesh\": {},\n \"language\": {\n \"granularity\": \"word\",\n \"identify_speakers\": false,\n \"sentiment\": null,\n \"toxicity\": null\n },\n \"ner\": {\n \"identify_speakers\": false\n },\n \"prosody\": {\n \"granularity\": \"utterance\",\n \"identify_speakers\": false,\n \"window\": null\n }\n },\n \"notify\": true,\n \"text\": [],\n \"urls\": [\n \"https://hume-tutorials.s3.amazonaws.com/faces.zip\"\n ]\n },\n \"state\": {\n \"created_timestamp_ms\": 1712590457884,\n \"ended_timestamp_ms\": 1712590462252,\n \"num_errors\": 0,\n \"num_predictions\": 10,\n \"started_timestamp_ms\": 1712590457995,\n \"status\": \"COMPLETED\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"b5838b5a-8fe8-4d6b-9439-5e77b6a8b017","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"27496704-1278-4ec0-804b-5b06a9ddad44","name":"Get job predictions - default","request":{"urlPathTemplate":"/v0/batch/jobs/{id}/predictions","method":"GET","pathParameters":{"id":{"equalTo":"job_id"}}},"response":{"status":200,"body":"[\n {\n \"source\": {\n \"type\": \"url\",\n \"url\": \"https://hume-tutorials.s3.amazonaws.com/faces.zip\"\n },\n \"results\": {\n \"predictions\": [\n {\n \"file\": \"faces/100.jpg\",\n \"models\": {\n \"face\": {\n \"metadata\": null,\n \"grouped_predictions\": [\n {\n \"id\": \"unknown\",\n \"predictions\": [\n {\n \"frame\": 0,\n \"time\": 0,\n \"prob\": 0.9994111061096191,\n \"box\": {\n \"x\": 1187.885986328125,\n \"y\": 1397.697509765625,\n \"w\": 1401.668701171875,\n \"h\": 1961.424560546875\n },\n \"emotions\": [\n {\n \"name\": \"Admiration\",\n \"score\": 0.10722749680280685\n },\n {\n \"name\": \"Adoration\",\n \"score\": 0.06395940482616425\n },\n {\n \"name\": \"Aesthetic Appreciation\",\n \"score\": 0.05811462551355362\n },\n {\n \"name\": \"Amusement\",\n \"score\": 0.14187128841876984\n },\n {\n \"name\": \"Anger\",\n \"score\": 0.02804684266448021\n },\n {\n \"name\": \"Anxiety\",\n \"score\": 0.2713485360145569\n },\n {\n \"name\": \"Awe\",\n \"score\": 0.33812594413757324\n },\n {\n \"name\": \"Awkwardness\",\n \"score\": 0.1745193600654602\n },\n {\n \"name\": \"Boredom\",\n \"score\": 0.23600080609321594\n },\n {\n \"name\": \"Calmness\",\n \"score\": 0.18988418579101562\n },\n {\n \"name\": \"Concentration\",\n \"score\": 0.44288986921310425\n },\n {\n \"name\": \"Confusion\",\n \"score\": 0.39346569776535034\n },\n {\n \"name\": \"Contemplation\",\n \"score\": 0.31002455949783325\n },\n {\n \"name\": \"Contempt\",\n \"score\": 0.048870109021663666\n },\n {\n \"name\": \"Contentment\",\n \"score\": 0.0579497292637825\n },\n {\n \"name\": \"Craving\",\n \"score\": 0.06544201076030731\n },\n {\n \"name\": \"Desire\",\n \"score\": 0.05526508390903473\n },\n {\n \"name\": \"Determination\",\n \"score\": 0.08590991795063019\n },\n {\n \"name\": \"Disappointment\",\n \"score\": 0.19508258998394012\n },\n {\n \"name\": \"Disgust\",\n \"score\": 0.031529419124126434\n },\n {\n \"name\": \"Distress\",\n \"score\": 0.23210826516151428\n },\n {\n \"name\": \"Doubt\",\n \"score\": 0.3284550905227661\n },\n {\n \"name\": \"Ecstasy\",\n \"score\": 0.040716782212257385\n },\n {\n \"name\": \"Embarrassment\",\n \"score\": 0.1467227339744568\n },\n {\n \"name\": \"Empathic Pain\",\n \"score\": 0.07633581757545471\n },\n {\n \"name\": \"Entrancement\",\n \"score\": 0.16245244443416595\n },\n {\n \"name\": \"Envy\",\n \"score\": 0.03267110139131546\n },\n {\n \"name\": \"Excitement\",\n \"score\": 0.10656816512346268\n },\n {\n \"name\": \"Fear\",\n \"score\": 0.3115977346897125\n },\n {\n \"name\": \"Guilt\",\n \"score\": 0.11615975946187973\n },\n {\n \"name\": \"Horror\",\n \"score\": 0.19795553386211395\n },\n {\n \"name\": \"Interest\",\n \"score\": 0.3136432468891144\n },\n {\n \"name\": \"Joy\",\n \"score\": 0.06285581737756729\n },\n {\n \"name\": \"Love\",\n \"score\": 0.06339752674102783\n },\n {\n \"name\": \"Nostalgia\",\n \"score\": 0.05866732448339462\n },\n {\n \"name\": \"Pain\",\n \"score\": 0.07684041559696198\n },\n {\n \"name\": \"Pride\",\n \"score\": 0.026822954416275024\n },\n {\n \"name\": \"Realization\",\n \"score\": 0.30000734329223633\n },\n {\n \"name\": \"Relief\",\n \"score\": 0.04414166510105133\n },\n {\n \"name\": \"Romance\",\n \"score\": 0.042728863656520844\n },\n {\n \"name\": \"Sadness\",\n \"score\": 0.14773206412792206\n },\n {\n \"name\": \"Satisfaction\",\n \"score\": 0.05902980640530586\n },\n {\n \"name\": \"Shame\",\n \"score\": 0.08103451132774353\n },\n {\n \"name\": \"Surprise (negative)\",\n \"score\": 0.25518184900283813\n },\n {\n \"name\": \"Surprise (positive)\",\n \"score\": 0.28845661878585815\n },\n {\n \"name\": \"Sympathy\",\n \"score\": 0.062488824129104614\n },\n {\n \"name\": \"Tiredness\",\n \"score\": 0.1559651643037796\n },\n {\n \"name\": \"Triumph\",\n \"score\": 0.01955239288508892\n }\n ],\n \"facs\": null,\n \"descriptions\": null\n }\n ]\n }\n ]\n }\n }\n }\n ],\n \"errors\": []\n }\n }\n]","headers":{"Content-Type":"application/json"}},"uuid":"27496704-1278-4ec0-804b-5b06a9ddad44","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"12d3146e-fa86-4658-bfca-755d6e939757","name":"Start inference job from local file - default","request":{"urlPathTemplate":"/v0/batch/jobs","method":"POST"},"response":{"status":200,"body":"{\n \"job_id\": \"job_id\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"12d3146e-fa86-4658-bfca-755d6e939757","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}}],"meta":{"total":48}} \ No newline at end of file