From a63c0b6808870fd7df0a634951cf2350dc090554 Mon Sep 17 00:00:00 2001 From: Seb Ringrose Date: Fri, 17 Apr 2026 16:08:04 +0100 Subject: [PATCH 1/3] docs: add Doubleword chat client example --- .../chat_client/doubleword_chat_client.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 python/samples/02-agents/chat_client/doubleword_chat_client.py diff --git a/python/samples/02-agents/chat_client/doubleword_chat_client.py b/python/samples/02-agents/chat_client/doubleword_chat_client.py new file mode 100644 index 0000000000..60fd00cd85 --- /dev/null +++ b/python/samples/02-agents/chat_client/doubleword_chat_client.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft. All rights reserved. + +""" +Doubleword Chat Client Example + +This sample demonstrates how to use the Microsoft Agent Framework with +Doubleword's OpenAI-compatible inference API. Doubleword is an AI model +gateway providing unified routing, management, and security for inference +across multiple model providers. + +Since Doubleword exposes an OpenAI-compatible API, you can use the built-in +OpenAIChatCompletionClient with a custom base URL. + +Setup: + pip install agent-framework-openai + export DOUBLEWORD_API_KEY="your-api-key" + +Available models: https://docs.doubleword.ai/inference-api/models + +For batch pricing (up to 90% savings with the Doubleword Inference API), +see https://pypi.org/project/autobatcher/ +""" + +import asyncio +import os + +from agent_framework import Message +from agent_framework.openai import OpenAIChatCompletionClient + + +async def main() -> None: + """Run a basic prompt using Doubleword's inference API.""" + client = OpenAIChatCompletionClient( + model="Qwen/Qwen3.5-397B-A17B-FP8", + base_url="https://api.doubleword.ai/v1", + api_key=os.environ["DOUBLEWORD_API_KEY"], + ) + + message = Message("user", contents=["Explain the benefits of an AI model gateway in one paragraph."]) + print(f"User: {message.text}") + + response = await client.get_response([message], stream=False) + print(f"Assistant: {response}") + + +if __name__ == "__main__": + asyncio.run(main()) + + +""" +Sample output: +User: Explain the benefits of an AI model gateway in one paragraph. +Assistant: An AI model gateway provides a unified API layer that routes inference +requests across multiple model providers, enabling organizations to ... +""" From 6793ba61ecd49eafe444d2f21272abd8b85eeda9 Mon Sep 17 00:00:00 2001 From: Seb Date: Fri, 17 Apr 2026 16:55:00 +0100 Subject: [PATCH 2/3] docs: add autobatcher example with BatchOpenAI async client Shows how to use autobatcher's BatchOpenAI as the async_client parameter for OpenAIChatCompletionClient, enabling batch pricing. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../chat_client/doubleword_chat_client.py | 26 ++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/python/samples/02-agents/chat_client/doubleword_chat_client.py b/python/samples/02-agents/chat_client/doubleword_chat_client.py index 60fd00cd85..18c3fc8d88 100644 --- a/python/samples/02-agents/chat_client/doubleword_chat_client.py +++ b/python/samples/02-agents/chat_client/doubleword_chat_client.py @@ -16,9 +16,6 @@ export DOUBLEWORD_API_KEY="your-api-key" Available models: https://docs.doubleword.ai/inference-api/models - -For batch pricing (up to 90% savings with the Doubleword Inference API), -see https://pypi.org/project/autobatcher/ """ import asyncio @@ -43,6 +40,29 @@ async def main() -> None: print(f"Assistant: {response}") +async def main_batch() -> None: + """Run batch requests at reduced cost using autobatcher. + + Install: pip install autobatcher + See: https://pypi.org/project/autobatcher/ + """ + from autobatcher import BatchOpenAI + + client = OpenAIChatCompletionClient( + model="Qwen/Qwen3.5-397B-A17B-FP8", + async_client=BatchOpenAI( + api_key=os.environ["DOUBLEWORD_API_KEY"], + base_url="https://api.doubleword.ai/v1", + ), + ) + + message = Message("user", contents=["Explain the benefits of an AI model gateway in one paragraph."]) + print(f"User: {message.text}") + + response = await client.get_response([message], stream=False) + print(f"Assistant: {response}") + + if __name__ == "__main__": asyncio.run(main()) From 5efe91d51736b436e5ed898ca4f0b584cfcecdb6 Mon Sep 17 00:00:00 2001 From: Seb Ringrose Date: Mon, 27 Apr 2026 17:05:30 +0100 Subject: [PATCH 3/3] docs(doubleword): add main_async() sample for 1h flex tier Mirror the existing main_batch() block with main_async(), using autobatcher.AsyncOpenAI (1h flex tier) instead of BatchOpenAI (24h). Updates the docstring header to list all three execution modes. --- .../chat_client/doubleword_chat_client.py | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/python/samples/02-agents/chat_client/doubleword_chat_client.py b/python/samples/02-agents/chat_client/doubleword_chat_client.py index 18c3fc8d88..c1fbf1703d 100644 --- a/python/samples/02-agents/chat_client/doubleword_chat_client.py +++ b/python/samples/02-agents/chat_client/doubleword_chat_client.py @@ -11,6 +11,11 @@ Since Doubleword exposes an OpenAI-compatible API, you can use the built-in OpenAIChatCompletionClient with a custom base URL. +Three execution modes are demonstrated: +- main() — realtime (priority tier) +- main_async() — 1-hour async (flex tier, mid-tier cost) +- main_batch() — 24-hour batch (deepest discount) + Setup: pip install agent-framework-openai export DOUBLEWORD_API_KEY="your-api-key" @@ -40,9 +45,37 @@ async def main() -> None: print(f"Assistant: {response}") +async def main_async() -> None: + """Run requests on the 1-hour async (flex) tier using autobatcher. + + Mid-tier cost between realtime and 24-hour batch — use when next-day + batch turnaround is too slow but realtime is too expensive. + + Install: pip install autobatcher + See: https://pypi.org/project/autobatcher/ + """ + from autobatcher import AsyncOpenAI + + client = OpenAIChatCompletionClient( + model="Qwen/Qwen3.5-397B-A17B-FP8", + async_client=AsyncOpenAI( + api_key=os.environ["DOUBLEWORD_API_KEY"], + base_url="https://api.doubleword.ai/v1", + ), + ) + + message = Message("user", contents=["Explain the benefits of an AI model gateway in one paragraph."]) + print(f"User: {message.text}") + + response = await client.get_response([message], stream=False) + print(f"Assistant: {response}") + + async def main_batch() -> None: """Run batch requests at reduced cost using autobatcher. + 24-hour batch tier — deepest discount (up to ~90% off realtime). + Install: pip install autobatcher See: https://pypi.org/project/autobatcher/ """