Skip to content

Commit beadef0

Browse files
committed
Add image url support to chat completion endpoint
1 parent 0fd6e41 commit beadef0

File tree

13 files changed

+245
-51
lines changed

13 files changed

+245
-51
lines changed

README.md

Lines changed: 61 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ More information: [set API key](#set-api-key), [add proxy](#add-proxy), [rate li
2424
- [Delete fine-tune model](#delete-fine-tune-model)
2525
- Chat
2626
- [Create chat completion](#create-chat-completion)
27+
- [Create chat completion with image](#create-chat-completion-with-image)
2728
- [Function calling](#function-calling)
2829
- Images
2930
- [Create image](#create-image)
@@ -135,6 +136,7 @@ Creates a model response for the given chat conversation.
135136
136137
```rust
137138
use openai_dive::v1::api::Client;
139+
use openai_dive::v1::models::Gpt4Engine;
138140
use openai_dive::v1::resources::chat::{ChatCompletionParameters, ChatMessage, Role};
139141
use std::env;
140142

@@ -145,16 +147,16 @@ async fn main() {
145147
let client = Client::new(api_key);
146148

147149
let parameters = ChatCompletionParameters {
148-
model: "gpt-3.5-turbo-16k-0613".to_string(),
150+
model: Gpt4Engine::Gpt41106Preview.to_string(),
149151
messages: vec![
150152
ChatMessage {
151153
role: Role::User,
152-
content: Some("Hello!".to_string()),
154+
content: ChatMessageContent::Text("Hello!".to_string()),
153155
..Default::default()
154156
},
155157
ChatMessage {
156158
role: Role::User,
157-
content: Some("Where are you located?".to_string()),
159+
content: ChatMessageContent::Text("What is the capital of Vietnam?".to_string()),
158160
..Default::default()
159161
},
160162
],
@@ -170,6 +172,55 @@ async fn main() {
170172

171173
More information: [Create chat completion](https://platform.openai.com/docs/api-reference/chat/create)
172174

175+
### Create chat completion with image
176+
177+
Creates a model response for the given chat conversation.
178+
179+
```rust
180+
use openai_dive::v1::api::Client;
181+
use openai_dive::v1::models::Gpt4Engine;
182+
use openai_dive::v1::resources::chat::{ChatCompletionParameters, ChatMessage, Role};
183+
use std::env;
184+
185+
#[tokio::main]
186+
async fn main() {
187+
let api_key = env::var("OPENAI_API_KEY").expect("$OPENAI_API_KEY is not set");
188+
189+
let client = Client::new(api_key);
190+
191+
let parameters = ChatCompletionParameters {
192+
model: Gpt4Engine::Gpt4VisionPreview.to_string(),
193+
messages: vec![
194+
ChatMessage {
195+
role: Role::User,
196+
content: ChatMessageContent::Text("What is in this image?".to_string()),
197+
..Default::default()
198+
},
199+
ChatMessage {
200+
role: Role::User,
201+
content: ChatMessageContent::ImageUrl(vec![ImageUrl {
202+
r#type: "image_url".to_string(),
203+
text: None,
204+
image_url: ImageUrlType {
205+
url: "https://images.unsplash.com/photo-1526682847805-721837c3f83b?w=640".to_string(),
206+
detail: None,
207+
},
208+
}]),
209+
..Default::default()
210+
},
211+
],
212+
max_tokens: Some(50),
213+
..Default::default()
214+
};
215+
216+
let result = client.chat().create(parameters).await.unwrap();
217+
218+
println!("{:#?}", result);
219+
}
220+
```
221+
222+
More information: [Create chat completion](https://platform.openai.com/docs/api-reference/chat/create)
223+
173224
### Function calling
174225

175226
In an API call, you can describe functions and have the model intelligently choose to output a JSON object containing arguments to call one or many functions. The Chat Completions API does not call the function; instead, the model generates JSON that you can use to call the function in your code.
@@ -179,6 +230,7 @@ In an API call, you can describe functions and have the model intelligently choo
179230
180231
```rust
181232
use openai_dive::v1::api::Client;
233+
use openai_dive::v1::models::Gpt4Engine;
182234
use openai_dive::v1::resources::chat::{
183235
ChatCompletionFunction, ChatCompletionParameters, ChatCompletionTool, ChatCompletionToolChoice,
184236
ChatCompletionToolChoiceFunction, ChatCompletionToolChoiceFunctionName, ChatCompletionToolType,
@@ -196,12 +248,12 @@ async fn main() {
196248
let client = Client::new(api_key);
197249

198250
let mut messages = vec![ChatMessage {
199-
content: Some("Give me a random number between 25 and 50?".to_string()),
251+
content: ChatMessageContent::Text("Give me a random number between 25 and 50?".to_string()),
200252
..Default::default()
201253
}];
202254

203255
let parameters = ChatCompletionParameters {
204-
model: "gpt-3.5-turbo-0613".to_string(),
256+
model: Gpt4Engine::Gpt41106Preview.to_string(),
205257
messages: messages.clone(),
206258
tool_choice: Some(ChatCompletionToolChoice::ChatCompletionToolChoiceFunction(
207259
ChatCompletionToolChoiceFunction {
@@ -242,13 +294,15 @@ async fn main() {
242294

243295
messages.push(ChatMessage {
244296
role: Role::Function,
245-
content: Some(serde_json::to_string(&random_number_result).unwrap()),
297+
content: ChatMessageContent::Text(
298+
serde_json::to_string(&random_number_result).unwrap(),
299+
),
246300
name: Some("get_random_number".to_string()),
247301
..Default::default()
248302
});
249303

250304
let parameters = ChatCompletionParameters {
251-
model: "gpt-3.5-turbo-0613".to_string(),
305+
model: Gpt4Engine::Gpt41106Preview.to_string(),
252306
messages: messages.clone(),
253307
..Default::default()
254308
};

examples/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ members = [
66
"audio/create_speech",
77
"chat/create_chat_completion",
88
"chat/create_chat_completion_stream",
9+
"chat/create_image_chat_completion",
910
"chat/function_calling",
1011
"chat/function_calling_stream",
1112
"chat/rate_limit_headers",

examples/chat/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ resolver = "2"
33
members = [
44
"create_chat_completion",
55
"create_chat_completion_stream",
6+
"create_image_chat_completion",
67
"function_calling",
78
"function_calling_stream",
89
"rate_limit_headers",

examples/chat/create_chat_completion/src/main.rs

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
use openai_dive::v1::api::Client;
2-
use openai_dive::v1::resources::chat::{ChatCompletionParameters, ChatMessage, Role};
2+
use openai_dive::v1::models::Gpt4Engine;
3+
use openai_dive::v1::resources::chat::{ChatCompletionParameters, ChatMessage, ChatMessageContent, Role};
34
use std::env;
45

56
#[tokio::main]
@@ -9,16 +10,16 @@ async fn main() {
910
let client = Client::new(api_key);
1011

1112
let parameters = ChatCompletionParameters {
12-
model: "gpt-3.5-turbo-16k-0613".to_string(),
13+
model: Gpt4Engine::Gpt41106Preview.to_string(),
1314
messages: vec![
1415
ChatMessage {
1516
role: Role::User,
16-
content: Some("Hello!".to_string()),
17+
content: ChatMessageContent::Text("Hello!".to_string()),
1718
..Default::default()
1819
},
1920
ChatMessage {
2021
role: Role::User,
21-
content: Some("Where are you located?".to_string()),
22+
content: ChatMessageContent::Text("Where are you located?".to_string()),
2223
..Default::default()
2324
},
2425
],

examples/chat/create_chat_completion_stream/src/main.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
use futures::StreamExt;
22
use openai_dive::v1::api::Client;
3-
use openai_dive::v1::resources::chat::{ChatCompletionParameters, ChatMessage, Role};
3+
use openai_dive::v1::resources::chat::{
4+
ChatCompletionParameters, ChatMessage, ChatMessageContent, Role,
5+
};
46
use std::env;
57

68
#[tokio::main]
@@ -14,12 +16,12 @@ async fn main() {
1416
messages: vec![
1517
ChatMessage {
1618
role: Role::User,
17-
content: Some("Hello!".to_string()),
19+
content: ChatMessageContent::Text("Hello!".to_string()),
1820
..Default::default()
1921
},
2022
ChatMessage {
2123
role: Role::User,
22-
content: Some("Where are you located?".to_string()),
24+
content: ChatMessageContent::Text("Where are you located?".to_string()),
2325
..Default::default()
2426
},
2527
],
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
[package]
2+
name = "create_image_chat_completion"
3+
version = "0.1.0"
4+
edition = "2021"
5+
publish = false
6+
7+
[dependencies]
8+
openai_dive = { path = "./../../../../openai-client" }
9+
tokio = { version = "1.0", features = ["full"] }
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
use openai_dive::v1::api::Client;
2+
use openai_dive::v1::models::Gpt4Engine;
3+
use openai_dive::v1::resources::chat::{ChatCompletionParameters, ChatMessage, ChatMessageContent, ImageUrl, ImageUrlType, Role};
4+
use std::env;
5+
6+
#[tokio::main]
7+
async fn main() {
8+
let api_key = env::var("OPENAI_API_KEY").expect("$OPENAI_API_KEY is not set");
9+
10+
let client = Client::new(api_key);
11+
12+
let parameters = ChatCompletionParameters {
13+
model: Gpt4Engine::Gpt4VisionPreview.to_string(),
14+
messages: vec![
15+
ChatMessage {
16+
role: Role::User,
17+
content: ChatMessageContent::Text("What is in this image?".to_string()),
18+
..Default::default()
19+
},
20+
ChatMessage {
21+
role: Role::User,
22+
content: ChatMessageContent::ImageUrl(vec![ImageUrl {
23+
r#type: "image_url".to_string(),
24+
text: None,
25+
image_url: ImageUrlType {
26+
url: "https://images.unsplash.com/photo-1526682847805-721837c3f83b?w=640".to_string(),
27+
detail: None,
28+
},
29+
}]),
30+
..Default::default()
31+
},
32+
],
33+
max_tokens: Some(50),
34+
..Default::default()
35+
};
36+
37+
let result = client.chat().create(parameters).await.unwrap();
38+
39+
println!("{:#?}", result);
40+
}

examples/chat/function_calling/src/main.rs

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
use openai_dive::v1::api::Client;
2+
use openai_dive::v1::models::Gpt4Engine;
23
use openai_dive::v1::resources::chat::{
3-
ChatCompletionFunction, ChatCompletionParameters, ChatCompletionTool, ChatCompletionToolChoice,
4-
ChatCompletionToolChoiceFunction, ChatCompletionToolChoiceFunctionName, ChatCompletionToolType,
5-
ChatMessage, Role,
4+
ChatCompletionFunction, ChatCompletionParameters, ChatCompletionTool, ChatCompletionToolChoice, ChatCompletionToolChoiceFunction, ChatCompletionToolChoiceFunctionName, ChatCompletionToolType,
5+
ChatMessage, ChatMessageContent, Role,
66
};
77
use openai_dive::v1::resources::shared::FinishReason;
88
use rand::Rng;
@@ -16,21 +16,19 @@ async fn main() {
1616
let client = Client::new(api_key);
1717

1818
let mut messages = vec![ChatMessage {
19-
content: Some("Give me a random number between 25 and 50?".to_string()),
19+
content: ChatMessageContent::Text("Give me a random number between 25 and 50?".to_string()),
2020
..Default::default()
2121
}];
2222

2323
let parameters = ChatCompletionParameters {
24-
model: "gpt-3.5-turbo-0613".to_string(),
24+
model: Gpt4Engine::Gpt41106Preview.to_string(),
2525
messages: messages.clone(),
26-
tool_choice: Some(ChatCompletionToolChoice::ChatCompletionToolChoiceFunction(
27-
ChatCompletionToolChoiceFunction {
28-
r#type: Some(ChatCompletionToolType::Function),
29-
function: ChatCompletionToolChoiceFunctionName {
30-
name: "get_random_number".to_string(),
31-
},
26+
tool_choice: Some(ChatCompletionToolChoice::ChatCompletionToolChoiceFunction(ChatCompletionToolChoiceFunction {
27+
r#type: Some(ChatCompletionToolType::Function),
28+
function: ChatCompletionToolChoiceFunctionName {
29+
name: "get_random_number".to_string(),
3230
},
33-
)),
31+
})),
3432
tools: Some(vec![ChatCompletionTool {
3533
r#type: ChatCompletionToolType::Function,
3634
function: ChatCompletionFunction {
@@ -54,15 +52,14 @@ async fn main() {
5452
if choice.finish_reason == FinishReason::StopSequenceReached {
5553
if let Some(tool_calls) = &choice.message.tool_calls {
5654
for tool_call in tool_calls.iter() {
57-
let random_numbers =
58-
serde_json::from_str(&tool_call.function.arguments).unwrap();
55+
let random_numbers = serde_json::from_str(&tool_call.function.arguments).unwrap();
5956

6057
if tool_call.function.name == "get_random_number" {
6158
let random_number_result = get_random_number(random_numbers);
6259

6360
messages.push(ChatMessage {
6461
role: Role::Function,
65-
content: Some(serde_json::to_string(&random_number_result).unwrap()),
62+
content: ChatMessageContent::Text(serde_json::to_string(&random_number_result).unwrap()),
6663
name: Some("get_random_number".to_string()),
6764
..Default::default()
6865
});

examples/chat/function_calling_stream/src/main.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ use openai_dive::v1::api::Client;
44
use openai_dive::v1::resources::chat::{
55
ChatCompletionFunction, ChatCompletionParameters, ChatCompletionTool, ChatCompletionToolChoice,
66
ChatCompletionToolChoiceFunction, ChatCompletionToolChoiceFunctionName, ChatCompletionToolType,
7-
ChatMessage, DeltaFunction, Role,
7+
ChatMessage, ChatMessageContent, DeltaFunction, Role,
88
};
99
use openai_dive::v1::resources::shared::FinishReason;
1010
use rand::Rng;
@@ -18,7 +18,7 @@ async fn main() {
1818
let client = Client::new(api_key);
1919

2020
let mut messages = vec![ChatMessage {
21-
content: Some("Give me a random number between 25 and 50?".to_string()),
21+
content: ChatMessageContent::Text("Give me a random number between 25 and 50?".to_string()),
2222
..Default::default()
2323
}];
2424

@@ -76,7 +76,7 @@ async fn main() {
7676

7777
messages.push(ChatMessage {
7878
role: Role::Function,
79-
content: Some(
79+
content: ChatMessageContent::Text(
8080
serde_json::to_string(&random_number_result).unwrap(),
8181
),
8282
name: Some("get_random_number".to_string()),

examples/chat/rate_limit_headers/src/main.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
use openai_dive::v1::api::Client;
22
use openai_dive::v1::resources::chat::{
3-
ChatCompletionParameters, ChatCompletionResponse, ChatMessage, Role,
3+
ChatCompletionParameters, ChatCompletionResponse, ChatMessage, ChatMessageContent, Role,
44
};
55
use openai_dive::v1::resources::shared::ResponseWrapper;
66
use std::env;
@@ -16,12 +16,12 @@ async fn main() {
1616
messages: vec![
1717
ChatMessage {
1818
role: Role::User,
19-
content: Some("Hello!".to_string()),
19+
content: ChatMessageContent::Text("Hello!".to_string()),
2020
..Default::default()
2121
},
2222
ChatMessage {
2323
role: Role::User,
24-
content: Some("Where are you located?".to_string()),
24+
content: ChatMessageContent::Text("Where are you located?".to_string()),
2525
..Default::default()
2626
},
2727
],

0 commit comments

Comments
 (0)