diff --git a/examples/async_demo.py b/examples/async_demo.py index 793b4e43fb..f27016a4e3 100755 --- a/examples/async_demo.py +++ b/examples/async_demo.py @@ -1,4 +1,4 @@ -#!/usr/bin/env -S poetry run python +#!/usr/bin/env -S rye run python import asyncio @@ -9,13 +9,21 @@ async def main() -> None: - stream = await client.completions.create( - model="gpt-3.5-turbo-instruct", - prompt="Say this is a test", + stream = await client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "Say this is a test", + }, + ], stream=True, ) - async for completion in stream: - print(completion.choices[0].text, end="") + async for chunk in stream: + if not chunk.choices: + continue + + print(chunk.choices[0].delta.content or "", end="") print() diff --git a/examples/demo.py b/examples/demo.py index ac1710f3e0..8b8ab4ba20 100755 --- a/examples/demo.py +++ b/examples/demo.py @@ -1,4 +1,4 @@ -#!/usr/bin/env -S poetry run python +#!/usr/bin/env -S rye run python from openai import OpenAI @@ -8,7 +8,7 @@ # Non-streaming: print("----- standard request -----") completion = client.chat.completions.create( - model="gpt-4", + model="gpt-4o", messages=[ { "role": "user", @@ -21,7 +21,7 @@ # Streaming: print("----- streaming request -----") stream = client.chat.completions.create( - model="gpt-4", + model="gpt-4o", messages=[ { "role": "user", @@ -40,7 +40,7 @@ # Response headers: print("----- custom response headers test -----") response = client.chat.completions.with_raw_response.create( - model="gpt-4", + model="gpt-4o", messages=[ { "role": "user", diff --git a/examples/streaming.py b/examples/streaming.py index 9a84891a83..1a4731c12f 100755 --- a/examples/streaming.py +++ b/examples/streaming.py @@ -1,4 +1,4 @@ -#!/usr/bin/env -S poetry run python +#!/usr/bin/env -S rye run python import asyncio @@ -12,11 +12,14 @@ def sync_main() -> None: client = OpenAI() - response = client.completions.create( - model="gpt-3.5-turbo-instruct", - prompt="1,2,3,", - max_tokens=5, - temperature=0, + response = client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "Count from 1 to 5.", + }, + ], stream=True, ) @@ -32,11 +35,14 @@ def sync_main() -> None: async def async_main() -> None: client = AsyncOpenAI() - response = await client.completions.create( - model="gpt-3.5-turbo-instruct", - prompt="1,2,3,", - max_tokens=5, - temperature=0, + response = await client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "Count from 1 to 5.", + }, + ], stream=True, ) diff --git a/examples/video.py b/examples/video.py index ee89e64697..af307e19ee 100644 --- a/examples/video.py +++ b/examples/video.py @@ -1,4 +1,4 @@ -#!/usr/bin/env -S poetry run python +#!/usr/bin/env -S rye run python import asyncio