|
8 | 8 |
|
9 | 9 | import httpx |
10 | 10 |
|
11 | | -from . import resources, _exceptions |
| 11 | +from . import _exceptions |
12 | 12 | from ._qs import Querystring |
13 | 13 | from ._types import ( |
14 | 14 | NOT_GIVEN, |
|
25 | 25 | get_async_library, |
26 | 26 | ) |
27 | 27 | from ._version import __version__ |
| 28 | +from .resources import files, images, models, batches, embeddings, completions, moderations |
28 | 29 | from ._streaming import Stream as Stream, AsyncStream as AsyncStream |
29 | 30 | from ._exceptions import OpenAIError, APIStatusError |
30 | 31 | from ._base_client import ( |
31 | 32 | DEFAULT_MAX_RETRIES, |
32 | 33 | SyncAPIClient, |
33 | 34 | AsyncAPIClient, |
34 | 35 | ) |
| 36 | +from .resources.beta import beta |
| 37 | +from .resources.chat import chat |
| 38 | +from .resources.audio import audio |
| 39 | +from .resources.uploads import uploads |
| 40 | +from .resources.fine_tuning import fine_tuning |
35 | 41 |
|
36 | | -__all__ = [ |
37 | | - "Timeout", |
38 | | - "Transport", |
39 | | - "ProxiesTypes", |
40 | | - "RequestOptions", |
41 | | - "resources", |
42 | | - "OpenAI", |
43 | | - "AsyncOpenAI", |
44 | | - "Client", |
45 | | - "AsyncClient", |
46 | | -] |
| 42 | +__all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"] |
47 | 43 |
|
48 | 44 |
|
49 | 45 | class OpenAI(SyncAPIClient): |
50 | | - completions: resources.Completions |
51 | | - chat: resources.Chat |
52 | | - embeddings: resources.Embeddings |
53 | | - files: resources.Files |
54 | | - images: resources.Images |
55 | | - audio: resources.Audio |
56 | | - moderations: resources.Moderations |
57 | | - models: resources.Models |
58 | | - fine_tuning: resources.FineTuning |
59 | | - beta: resources.Beta |
60 | | - batches: resources.Batches |
61 | | - uploads: resources.Uploads |
| 46 | + completions: completions.Completions |
| 47 | + chat: chat.Chat |
| 48 | + embeddings: embeddings.Embeddings |
| 49 | + files: files.Files |
| 50 | + images: images.Images |
| 51 | + audio: audio.Audio |
| 52 | + moderations: moderations.Moderations |
| 53 | + models: models.Models |
| 54 | + fine_tuning: fine_tuning.FineTuning |
| 55 | + beta: beta.Beta |
| 56 | + batches: batches.Batches |
| 57 | + uploads: uploads.Uploads |
62 | 58 | with_raw_response: OpenAIWithRawResponse |
63 | 59 | with_streaming_response: OpenAIWithStreamedResponse |
64 | 60 |
|
@@ -133,18 +129,18 @@ def __init__( |
133 | 129 |
|
134 | 130 | self._default_stream_cls = Stream |
135 | 131 |
|
136 | | - self.completions = resources.Completions(self) |
137 | | - self.chat = resources.Chat(self) |
138 | | - self.embeddings = resources.Embeddings(self) |
139 | | - self.files = resources.Files(self) |
140 | | - self.images = resources.Images(self) |
141 | | - self.audio = resources.Audio(self) |
142 | | - self.moderations = resources.Moderations(self) |
143 | | - self.models = resources.Models(self) |
144 | | - self.fine_tuning = resources.FineTuning(self) |
145 | | - self.beta = resources.Beta(self) |
146 | | - self.batches = resources.Batches(self) |
147 | | - self.uploads = resources.Uploads(self) |
| 132 | + self.completions = completions.Completions(self) |
| 133 | + self.chat = chat.Chat(self) |
| 134 | + self.embeddings = embeddings.Embeddings(self) |
| 135 | + self.files = files.Files(self) |
| 136 | + self.images = images.Images(self) |
| 137 | + self.audio = audio.Audio(self) |
| 138 | + self.moderations = moderations.Moderations(self) |
| 139 | + self.models = models.Models(self) |
| 140 | + self.fine_tuning = fine_tuning.FineTuning(self) |
| 141 | + self.beta = beta.Beta(self) |
| 142 | + self.batches = batches.Batches(self) |
| 143 | + self.uploads = uploads.Uploads(self) |
148 | 144 | self.with_raw_response = OpenAIWithRawResponse(self) |
149 | 145 | self.with_streaming_response = OpenAIWithStreamedResponse(self) |
150 | 146 |
|
@@ -261,18 +257,18 @@ def _make_status_error( |
261 | 257 |
|
262 | 258 |
|
263 | 259 | class AsyncOpenAI(AsyncAPIClient): |
264 | | - completions: resources.AsyncCompletions |
265 | | - chat: resources.AsyncChat |
266 | | - embeddings: resources.AsyncEmbeddings |
267 | | - files: resources.AsyncFiles |
268 | | - images: resources.AsyncImages |
269 | | - audio: resources.AsyncAudio |
270 | | - moderations: resources.AsyncModerations |
271 | | - models: resources.AsyncModels |
272 | | - fine_tuning: resources.AsyncFineTuning |
273 | | - beta: resources.AsyncBeta |
274 | | - batches: resources.AsyncBatches |
275 | | - uploads: resources.AsyncUploads |
| 260 | + completions: completions.AsyncCompletions |
| 261 | + chat: chat.AsyncChat |
| 262 | + embeddings: embeddings.AsyncEmbeddings |
| 263 | + files: files.AsyncFiles |
| 264 | + images: images.AsyncImages |
| 265 | + audio: audio.AsyncAudio |
| 266 | + moderations: moderations.AsyncModerations |
| 267 | + models: models.AsyncModels |
| 268 | + fine_tuning: fine_tuning.AsyncFineTuning |
| 269 | + beta: beta.AsyncBeta |
| 270 | + batches: batches.AsyncBatches |
| 271 | + uploads: uploads.AsyncUploads |
276 | 272 | with_raw_response: AsyncOpenAIWithRawResponse |
277 | 273 | with_streaming_response: AsyncOpenAIWithStreamedResponse |
278 | 274 |
|
@@ -347,18 +343,18 @@ def __init__( |
347 | 343 |
|
348 | 344 | self._default_stream_cls = AsyncStream |
349 | 345 |
|
350 | | - self.completions = resources.AsyncCompletions(self) |
351 | | - self.chat = resources.AsyncChat(self) |
352 | | - self.embeddings = resources.AsyncEmbeddings(self) |
353 | | - self.files = resources.AsyncFiles(self) |
354 | | - self.images = resources.AsyncImages(self) |
355 | | - self.audio = resources.AsyncAudio(self) |
356 | | - self.moderations = resources.AsyncModerations(self) |
357 | | - self.models = resources.AsyncModels(self) |
358 | | - self.fine_tuning = resources.AsyncFineTuning(self) |
359 | | - self.beta = resources.AsyncBeta(self) |
360 | | - self.batches = resources.AsyncBatches(self) |
361 | | - self.uploads = resources.AsyncUploads(self) |
| 346 | + self.completions = completions.AsyncCompletions(self) |
| 347 | + self.chat = chat.AsyncChat(self) |
| 348 | + self.embeddings = embeddings.AsyncEmbeddings(self) |
| 349 | + self.files = files.AsyncFiles(self) |
| 350 | + self.images = images.AsyncImages(self) |
| 351 | + self.audio = audio.AsyncAudio(self) |
| 352 | + self.moderations = moderations.AsyncModerations(self) |
| 353 | + self.models = models.AsyncModels(self) |
| 354 | + self.fine_tuning = fine_tuning.AsyncFineTuning(self) |
| 355 | + self.beta = beta.AsyncBeta(self) |
| 356 | + self.batches = batches.AsyncBatches(self) |
| 357 | + self.uploads = uploads.AsyncUploads(self) |
362 | 358 | self.with_raw_response = AsyncOpenAIWithRawResponse(self) |
363 | 359 | self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self) |
364 | 360 |
|
@@ -476,66 +472,66 @@ def _make_status_error( |
476 | 472 |
|
477 | 473 | class OpenAIWithRawResponse: |
478 | 474 | def __init__(self, client: OpenAI) -> None: |
479 | | - self.completions = resources.CompletionsWithRawResponse(client.completions) |
480 | | - self.chat = resources.ChatWithRawResponse(client.chat) |
481 | | - self.embeddings = resources.EmbeddingsWithRawResponse(client.embeddings) |
482 | | - self.files = resources.FilesWithRawResponse(client.files) |
483 | | - self.images = resources.ImagesWithRawResponse(client.images) |
484 | | - self.audio = resources.AudioWithRawResponse(client.audio) |
485 | | - self.moderations = resources.ModerationsWithRawResponse(client.moderations) |
486 | | - self.models = resources.ModelsWithRawResponse(client.models) |
487 | | - self.fine_tuning = resources.FineTuningWithRawResponse(client.fine_tuning) |
488 | | - self.beta = resources.BetaWithRawResponse(client.beta) |
489 | | - self.batches = resources.BatchesWithRawResponse(client.batches) |
490 | | - self.uploads = resources.UploadsWithRawResponse(client.uploads) |
| 475 | + self.completions = completions.CompletionsWithRawResponse(client.completions) |
| 476 | + self.chat = chat.ChatWithRawResponse(client.chat) |
| 477 | + self.embeddings = embeddings.EmbeddingsWithRawResponse(client.embeddings) |
| 478 | + self.files = files.FilesWithRawResponse(client.files) |
| 479 | + self.images = images.ImagesWithRawResponse(client.images) |
| 480 | + self.audio = audio.AudioWithRawResponse(client.audio) |
| 481 | + self.moderations = moderations.ModerationsWithRawResponse(client.moderations) |
| 482 | + self.models = models.ModelsWithRawResponse(client.models) |
| 483 | + self.fine_tuning = fine_tuning.FineTuningWithRawResponse(client.fine_tuning) |
| 484 | + self.beta = beta.BetaWithRawResponse(client.beta) |
| 485 | + self.batches = batches.BatchesWithRawResponse(client.batches) |
| 486 | + self.uploads = uploads.UploadsWithRawResponse(client.uploads) |
491 | 487 |
|
492 | 488 |
|
493 | 489 | class AsyncOpenAIWithRawResponse: |
494 | 490 | def __init__(self, client: AsyncOpenAI) -> None: |
495 | | - self.completions = resources.AsyncCompletionsWithRawResponse(client.completions) |
496 | | - self.chat = resources.AsyncChatWithRawResponse(client.chat) |
497 | | - self.embeddings = resources.AsyncEmbeddingsWithRawResponse(client.embeddings) |
498 | | - self.files = resources.AsyncFilesWithRawResponse(client.files) |
499 | | - self.images = resources.AsyncImagesWithRawResponse(client.images) |
500 | | - self.audio = resources.AsyncAudioWithRawResponse(client.audio) |
501 | | - self.moderations = resources.AsyncModerationsWithRawResponse(client.moderations) |
502 | | - self.models = resources.AsyncModelsWithRawResponse(client.models) |
503 | | - self.fine_tuning = resources.AsyncFineTuningWithRawResponse(client.fine_tuning) |
504 | | - self.beta = resources.AsyncBetaWithRawResponse(client.beta) |
505 | | - self.batches = resources.AsyncBatchesWithRawResponse(client.batches) |
506 | | - self.uploads = resources.AsyncUploadsWithRawResponse(client.uploads) |
| 491 | + self.completions = completions.AsyncCompletionsWithRawResponse(client.completions) |
| 492 | + self.chat = chat.AsyncChatWithRawResponse(client.chat) |
| 493 | + self.embeddings = embeddings.AsyncEmbeddingsWithRawResponse(client.embeddings) |
| 494 | + self.files = files.AsyncFilesWithRawResponse(client.files) |
| 495 | + self.images = images.AsyncImagesWithRawResponse(client.images) |
| 496 | + self.audio = audio.AsyncAudioWithRawResponse(client.audio) |
| 497 | + self.moderations = moderations.AsyncModerationsWithRawResponse(client.moderations) |
| 498 | + self.models = models.AsyncModelsWithRawResponse(client.models) |
| 499 | + self.fine_tuning = fine_tuning.AsyncFineTuningWithRawResponse(client.fine_tuning) |
| 500 | + self.beta = beta.AsyncBetaWithRawResponse(client.beta) |
| 501 | + self.batches = batches.AsyncBatchesWithRawResponse(client.batches) |
| 502 | + self.uploads = uploads.AsyncUploadsWithRawResponse(client.uploads) |
507 | 503 |
|
508 | 504 |
|
509 | 505 | class OpenAIWithStreamedResponse: |
510 | 506 | def __init__(self, client: OpenAI) -> None: |
511 | | - self.completions = resources.CompletionsWithStreamingResponse(client.completions) |
512 | | - self.chat = resources.ChatWithStreamingResponse(client.chat) |
513 | | - self.embeddings = resources.EmbeddingsWithStreamingResponse(client.embeddings) |
514 | | - self.files = resources.FilesWithStreamingResponse(client.files) |
515 | | - self.images = resources.ImagesWithStreamingResponse(client.images) |
516 | | - self.audio = resources.AudioWithStreamingResponse(client.audio) |
517 | | - self.moderations = resources.ModerationsWithStreamingResponse(client.moderations) |
518 | | - self.models = resources.ModelsWithStreamingResponse(client.models) |
519 | | - self.fine_tuning = resources.FineTuningWithStreamingResponse(client.fine_tuning) |
520 | | - self.beta = resources.BetaWithStreamingResponse(client.beta) |
521 | | - self.batches = resources.BatchesWithStreamingResponse(client.batches) |
522 | | - self.uploads = resources.UploadsWithStreamingResponse(client.uploads) |
| 507 | + self.completions = completions.CompletionsWithStreamingResponse(client.completions) |
| 508 | + self.chat = chat.ChatWithStreamingResponse(client.chat) |
| 509 | + self.embeddings = embeddings.EmbeddingsWithStreamingResponse(client.embeddings) |
| 510 | + self.files = files.FilesWithStreamingResponse(client.files) |
| 511 | + self.images = images.ImagesWithStreamingResponse(client.images) |
| 512 | + self.audio = audio.AudioWithStreamingResponse(client.audio) |
| 513 | + self.moderations = moderations.ModerationsWithStreamingResponse(client.moderations) |
| 514 | + self.models = models.ModelsWithStreamingResponse(client.models) |
| 515 | + self.fine_tuning = fine_tuning.FineTuningWithStreamingResponse(client.fine_tuning) |
| 516 | + self.beta = beta.BetaWithStreamingResponse(client.beta) |
| 517 | + self.batches = batches.BatchesWithStreamingResponse(client.batches) |
| 518 | + self.uploads = uploads.UploadsWithStreamingResponse(client.uploads) |
523 | 519 |
|
524 | 520 |
|
525 | 521 | class AsyncOpenAIWithStreamedResponse: |
526 | 522 | def __init__(self, client: AsyncOpenAI) -> None: |
527 | | - self.completions = resources.AsyncCompletionsWithStreamingResponse(client.completions) |
528 | | - self.chat = resources.AsyncChatWithStreamingResponse(client.chat) |
529 | | - self.embeddings = resources.AsyncEmbeddingsWithStreamingResponse(client.embeddings) |
530 | | - self.files = resources.AsyncFilesWithStreamingResponse(client.files) |
531 | | - self.images = resources.AsyncImagesWithStreamingResponse(client.images) |
532 | | - self.audio = resources.AsyncAudioWithStreamingResponse(client.audio) |
533 | | - self.moderations = resources.AsyncModerationsWithStreamingResponse(client.moderations) |
534 | | - self.models = resources.AsyncModelsWithStreamingResponse(client.models) |
535 | | - self.fine_tuning = resources.AsyncFineTuningWithStreamingResponse(client.fine_tuning) |
536 | | - self.beta = resources.AsyncBetaWithStreamingResponse(client.beta) |
537 | | - self.batches = resources.AsyncBatchesWithStreamingResponse(client.batches) |
538 | | - self.uploads = resources.AsyncUploadsWithStreamingResponse(client.uploads) |
| 523 | + self.completions = completions.AsyncCompletionsWithStreamingResponse(client.completions) |
| 524 | + self.chat = chat.AsyncChatWithStreamingResponse(client.chat) |
| 525 | + self.embeddings = embeddings.AsyncEmbeddingsWithStreamingResponse(client.embeddings) |
| 526 | + self.files = files.AsyncFilesWithStreamingResponse(client.files) |
| 527 | + self.images = images.AsyncImagesWithStreamingResponse(client.images) |
| 528 | + self.audio = audio.AsyncAudioWithStreamingResponse(client.audio) |
| 529 | + self.moderations = moderations.AsyncModerationsWithStreamingResponse(client.moderations) |
| 530 | + self.models = models.AsyncModelsWithStreamingResponse(client.models) |
| 531 | + self.fine_tuning = fine_tuning.AsyncFineTuningWithStreamingResponse(client.fine_tuning) |
| 532 | + self.beta = beta.AsyncBetaWithStreamingResponse(client.beta) |
| 533 | + self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches) |
| 534 | + self.uploads = uploads.AsyncUploadsWithStreamingResponse(client.uploads) |
539 | 535 |
|
540 | 536 |
|
541 | 537 | Client = OpenAI |
|
0 commit comments