Skip to content

Commit fbaf9c1

Browse files
committed
Update chat examples with new models
1 parent ed4e95e commit fbaf9c1

File tree

6 files changed

+34
-19
lines changed
  • examples/chat

6 files changed

+34
-19
lines changed

examples/chat/create_chat_completion/src/main.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
use openai_dive::v1::api::Client;
22
use openai_dive::v1::models::Gpt4Engine;
3-
use openai_dive::v1::resources::chat::{ChatCompletionParameters, ChatMessage, ChatMessageContent, Role};
3+
use openai_dive::v1::resources::chat::{
4+
ChatCompletionParameters, ChatMessage, ChatMessageContent, Role,
5+
};
46
use std::env;
57

68
#[tokio::main]
@@ -10,7 +12,7 @@ async fn main() {
1012
let client = Client::new(api_key);
1113

1214
let parameters = ChatCompletionParameters {
13-
model: Gpt4Engine::Gpt41106Preview.to_string(),
15+
model: Gpt4Engine::Gpt40125Preview.to_string(),
1416
messages: vec![
1517
ChatMessage {
1618
role: Role::User,

examples/chat/create_chat_completion_stream/src/main.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
use futures::StreamExt;
22
use openai_dive::v1::api::Client;
33
use openai_dive::v1::models::Gpt4Engine;
4-
use openai_dive::v1::resources::chat::{ChatCompletionParameters, ChatMessage, ChatMessageContent, Role};
4+
use openai_dive::v1::resources::chat::{
5+
ChatCompletionParameters, ChatMessage, ChatMessageContent, Role,
6+
};
57
use std::env;
68

79
#[tokio::main]
@@ -11,7 +13,7 @@ async fn main() {
1113
let client = Client::new(api_key);
1214

1315
let parameters = ChatCompletionParameters {
14-
model: Gpt4Engine::Gpt41106Preview.to_string(),
16+
model: Gpt4Engine::Gpt40125Preview.to_string(),
1517
messages: vec![
1618
ChatMessage {
1719
role: Role::User,

examples/chat/create_image_chat_completion/src/main.rs

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
use openai_dive::v1::api::Client;
22
use openai_dive::v1::models::Gpt4Engine;
3-
use openai_dive::v1::resources::chat::{ChatCompletionParameters, ChatMessage, ChatMessageContent, ImageUrl, ImageUrlType, Role};
3+
use openai_dive::v1::resources::chat::{
4+
ChatCompletionParameters, ChatMessage, ChatMessageContent, ImageUrl, ImageUrlType, Role,
5+
};
46
use std::env;
57

68
#[tokio::main]
@@ -23,7 +25,8 @@ async fn main() {
2325
r#type: "image_url".to_string(),
2426
text: None,
2527
image_url: ImageUrlType {
26-
url: "https://images.unsplash.com/photo-1526682847805-721837c3f83b?w=640".to_string(),
28+
url: "https://images.unsplash.com/photo-1526682847805-721837c3f83b?w=640"
29+
.to_string(),
2730
detail: None,
2831
},
2932
}]),

examples/chat/function_calling/src/main.rs

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
use openai_dive::v1::api::Client;
22
use openai_dive::v1::models::Gpt4Engine;
33
use openai_dive::v1::resources::chat::{
4-
ChatCompletionFunction, ChatCompletionParameters, ChatCompletionTool, ChatCompletionToolType, ChatMessage,
5-
ChatMessageContent,
4+
ChatCompletionFunction, ChatCompletionParameters, ChatCompletionTool, ChatCompletionToolType,
5+
ChatMessage, ChatMessageContent,
66
};
77
use rand::Rng;
88
use serde::{Deserialize, Serialize};
@@ -15,12 +15,14 @@ async fn main() {
1515
let client = Client::new(api_key);
1616

1717
let messages = vec![ChatMessage {
18-
content: ChatMessageContent::Text("Give me a random number between 100 and no more than 150?".to_string()),
18+
content: ChatMessageContent::Text(
19+
"Give me a random number between 100 and no more than 150?".to_string(),
20+
),
1921
..Default::default()
2022
}];
2123

2224
let parameters = ChatCompletionParameters {
23-
model: Gpt4Engine::Gpt41106Preview.to_string(),
25+
model: Gpt4Engine::Gpt40125Preview.to_string(),
2426
messages: messages.clone(),
2527
tools: Some(vec![ChatCompletionTool {
2628
r#type: ChatCompletionToolType::Function,

examples/chat/function_calling_stream/src/main.rs

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@ use futures::StreamExt;
22
use openai_dive::v1::api::Client;
33
use openai_dive::v1::models::Gpt4Engine;
44
use openai_dive::v1::resources::chat::{
5-
ChatCompletionFunction, ChatCompletionParameters, ChatCompletionTool, ChatCompletionToolType, ChatMessage,
6-
ChatMessageContent, DeltaFunction,
5+
ChatCompletionFunction, ChatCompletionParameters, ChatCompletionTool, ChatCompletionToolType,
6+
ChatMessage, ChatMessageContent, DeltaFunction,
77
};
88
use openai_dive::v1::resources::shared::FinishReason;
99
use rand::Rng;
@@ -17,12 +17,14 @@ async fn main() {
1717
let client = Client::new(api_key);
1818

1919
let messages = vec![ChatMessage {
20-
content: ChatMessageContent::Text("Give me a random number higher than 100 but less than 2*150?".to_string()),
20+
content: ChatMessageContent::Text(
21+
"Give me a random number higher than 100 but less than 2*150?".to_string(),
22+
),
2123
..Default::default()
2224
}];
2325

2426
let parameters = ChatCompletionParameters {
25-
model: Gpt4Engine::Gpt41106Preview.to_string(),
27+
model: Gpt4Engine::Gpt40125Preview.to_string(),
2628
messages: messages.clone(),
2729
tools: Some(vec![ChatCompletionTool {
2830
r#type: ChatCompletionToolType::Function,
@@ -63,7 +65,8 @@ async fn main() {
6365
let arguments = function.arguments.clone().unwrap();
6466

6567
if name == "get_random_number" {
66-
let random_numbers: RandomNumber = serde_json::from_str(&arguments).unwrap();
68+
let random_numbers: RandomNumber =
69+
serde_json::from_str(&arguments).unwrap();
6770

6871
println!("Min: {:?}", &random_numbers.min);
6972
println!("Max: {:?}", &random_numbers.max);

examples/chat/rate_limit_headers/src/main.rs

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ async fn main() {
1313
let client = Client::new(api_key);
1414

1515
let parameters = ChatCompletionParameters {
16-
model: Gpt4Engine::Gpt41106Preview.to_string(),
16+
model: Gpt4Engine::Gpt40125Preview.to_string(),
1717
messages: vec![
1818
ChatMessage {
1919
role: Role::User,
@@ -22,15 +22,18 @@ async fn main() {
2222
},
2323
ChatMessage {
2424
role: Role::User,
25-
content: ChatMessageContent::Text("Which country has the largest population?".to_string()),
25+
content: ChatMessageContent::Text(
26+
"Which country has the largest population?".to_string(),
27+
),
2628
..Default::default()
2729
},
2830
],
29-
max_tokens: Some(12),
31+
max_tokens: Some(50),
3032
..Default::default()
3133
};
3234

33-
let result: ResponseWrapper<ChatCompletionResponse> = client.chat().create_wrapped(parameters).await.unwrap();
35+
let result: ResponseWrapper<ChatCompletionResponse> =
36+
client.chat().create_wrapped(parameters).await.unwrap();
3437

3538
println!("{:#?}", result.headers);
3639

0 commit comments

Comments
 (0)