Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .fernignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
# Specify files that shouldn't be modified by Fern

README.md
tests/unit/types/OpenAiMessageRole.test.ts
tests/unit/types/OpenAiModelReasoningEffort.test.ts
tests/unit/types/OpenAiModel.test.ts
tests/custom.test.ts
16 changes: 16 additions & 0 deletions src/api/types/OpenAiMessageRole.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,26 @@
// This file was auto-generated by Fern from our API Definition.

/**
* Roles for OpenAI messages.
*
* Note: The "function" role is deprecated in favor of "tool". Use "tool" for new implementations.
* The "developer" role is required for GPT-5.x and o-series models.
*/
export const OpenAiMessageRole = {
Assistant: "assistant",
/**
* @deprecated The "function" role is deprecated in favor of "tool". Use "tool" for new implementations.
* @see https://platform.openai.com/docs/guides/function-calling
*/
Function: "function",
User: "user",
System: "system",
Tool: "tool",
/**
* The "developer" role is used for system-level instructions in GPT-5.x and o-series models.
* It provides a way to set high-level instructions that take precedence over user messages.
* @see https://platform.openai.com/docs/guides/text-generation
*/
Developer: "developer",
} as const;
export type OpenAiMessageRole = (typeof OpenAiMessageRole)[keyof typeof OpenAiMessageRole];
69 changes: 69 additions & 0 deletions src/api/types/OpenAiModel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -80,4 +80,73 @@ export interface OpenAiModel {
* @default 0
*/
numFastTurns?: number;
/**
* If specified, the system will make a best effort to sample deterministically,
* such that repeated requests with the same seed and parameters should return the same result.
* Determinism is not guaranteed.
*
* @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-seed
*/
seed?: number;
/**
* An alternative to sampling with temperature, called nucleus sampling,
* where the model considers the results of the tokens with top_p probability mass.
* So 0.1 means only the tokens comprising the top 10% probability mass are considered.
*
* We generally recommend altering this or temperature but not both.
*
* @default 1
* @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p
*/
topP?: number;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
* frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
*
* @default 0
* @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty
*/
frequencyPenalty?: number;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they
* appear in the text so far, increasing the model's likelihood to talk about new topics.
*
* @default 0
* @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty
*/
presencePenalty?: number;
/**
* Whether to return log probabilities of the output tokens or not.
* If true, returns the log probabilities of each output token returned in the content of message.
*
* @default false
* @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-logprobs
*/
logprobs?: boolean;
/**
* An integer between 0 and 20 specifying the number of most likely tokens to return at each
* token position, each with an associated log probability. logprobs must be set to true if
* this parameter is used.
*
* @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_logprobs
*/
topLogprobs?: number;
/**
* Whether to enable parallel function calling during tool use.
* When set to true, the model can call multiple functions in a single response.
*
* @default true
* @see https://platform.openai.com/docs/guides/function-calling#parallel-function-calling
*/
parallelToolCalls?: boolean;
/**
* Constrains effort on reasoning for reasoning models (o1, o3, etc.).
* Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
*
* Possible values: "low", "medium", "high"
*
* @default "medium"
* @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort
*/
reasoningEffort?: Vapi.OpenAiModelReasoningEffort;
}
15 changes: 15 additions & 0 deletions src/api/types/OpenAiModelReasoningEffort.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// This file was auto-generated by Fern from our API Definition.

/**
* Constrains effort on reasoning for reasoning models (o1, o3, etc.).
* Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
*
* @default "medium"
* @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort
*/
export const OpenAiModelReasoningEffort = {
Low: "low",
Medium: "medium",
High: "high",
} as const;
export type OpenAiModelReasoningEffort = (typeof OpenAiModelReasoningEffort)[keyof typeof OpenAiModelReasoningEffort];
1 change: 1 addition & 0 deletions src/api/types/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1095,6 +1095,7 @@ export * from "./OpenAiModelFallbackModelsItem.js";
export * from "./OpenAiModelModel.js";
export * from "./OpenAiModelPromptCacheRetention.js";
export * from "./OpenAiModelProvider.js";
export * from "./OpenAiModelReasoningEffort.js";
export * from "./OpenAiModelToolStrictCompatibilityMode.js";
export * from "./OpenAiModelToolsItem.js";
export * from "./OpenAiTranscriber.js";
Expand Down
73 changes: 73 additions & 0 deletions tests/unit/types/OpenAiMessageRole.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/**
* Tests for OpenAiMessageRole enum
* VAP-11729: Update server-sdk-typescript SDK with OpenAI spec updates
*/
import { describe, it, expect } from "vitest";
import { OpenAiMessageRole } from "../../../src/api/types/OpenAiMessageRole.js";

describe("OpenAiMessageRole", () => {
describe("enum values", () => {
it("should have 'assistant' role", () => {
expect(OpenAiMessageRole.Assistant).toBe("assistant");
});

it("should have 'function' role (deprecated)", () => {
expect(OpenAiMessageRole.Function).toBe("function");
});

it("should have 'user' role", () => {
expect(OpenAiMessageRole.User).toBe("user");
});

it("should have 'system' role", () => {
expect(OpenAiMessageRole.System).toBe("system");
});

it("should have 'tool' role", () => {
expect(OpenAiMessageRole.Tool).toBe("tool");
});

it("should have 'developer' role for GPT-5.x and o-series models", () => {
expect(OpenAiMessageRole.Developer).toBe("developer");
});
});

describe("type safety", () => {
it("should accept valid role values", () => {
const roles: OpenAiMessageRole[] = [
OpenAiMessageRole.Assistant,
OpenAiMessageRole.Function,
OpenAiMessageRole.User,
OpenAiMessageRole.System,
OpenAiMessageRole.Tool,
OpenAiMessageRole.Developer,
];

expect(roles).toHaveLength(6);
expect(roles).toContain("assistant");
expect(roles).toContain("function");
expect(roles).toContain("user");
expect(roles).toContain("system");
expect(roles).toContain("tool");
expect(roles).toContain("developer");
});

it("should have all expected roles defined", () => {
const expectedRoles = ["assistant", "function", "user", "system", "tool", "developer"];
const actualRoles = Object.values(OpenAiMessageRole);

expect(actualRoles.sort()).toEqual(expectedRoles.sort());
});
});

describe("backward compatibility", () => {
it("should maintain existing role string values", () => {
// Ensure existing roles haven't changed their string values
expect(OpenAiMessageRole.Assistant).toBe("assistant");
expect(OpenAiMessageRole.Function).toBe("function");
expect(OpenAiMessageRole.User).toBe("user");
expect(OpenAiMessageRole.System).toBe("system");
expect(OpenAiMessageRole.Tool).toBe("tool");
});
});
});
172 changes: 172 additions & 0 deletions tests/unit/types/OpenAiModel.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
/**
* Tests for OpenAiModel interface
* VAP-11729: Update server-sdk-typescript SDK with OpenAI spec updates
*/
import { describe, it, expect } from "vitest";
import type { OpenAiModel } from "../../../src/api/types/OpenAiModel.js";
import { OpenAiModelReasoningEffort } from "../../../src/api/types/OpenAiModelReasoningEffort.js";

describe("OpenAiModel", () => {
describe("new optional parameters", () => {
it("should allow creating a model config with seed parameter", () => {
const model: Partial<OpenAiModel> = {
seed: 42,
};

expect(model.seed).toBe(42);
});

it("should allow creating a model config with topP parameter", () => {
const model: Partial<OpenAiModel> = {
topP: 0.9,
};

expect(model.topP).toBe(0.9);
});

it("should allow creating a model config with frequencyPenalty parameter", () => {
const model: Partial<OpenAiModel> = {
frequencyPenalty: 0.5,
};

expect(model.frequencyPenalty).toBe(0.5);
});

it("should allow creating a model config with presencePenalty parameter", () => {
const model: Partial<OpenAiModel> = {
presencePenalty: 0.3,
};

expect(model.presencePenalty).toBe(0.3);
});

it("should allow creating a model config with logprobs parameter", () => {
const model: Partial<OpenAiModel> = {
logprobs: true,
};

expect(model.logprobs).toBe(true);
});

it("should allow creating a model config with topLogprobs parameter", () => {
const model: Partial<OpenAiModel> = {
logprobs: true,
topLogprobs: 5,
};

expect(model.topLogprobs).toBe(5);
});

it("should allow creating a model config with parallelToolCalls parameter", () => {
const model: Partial<OpenAiModel> = {
parallelToolCalls: false,
};

expect(model.parallelToolCalls).toBe(false);
});

it("should allow creating a model config with reasoningEffort parameter", () => {
const model: Partial<OpenAiModel> = {
reasoningEffort: OpenAiModelReasoningEffort.High,
};

expect(model.reasoningEffort).toBe("high");
});
});

describe("parameter validation (conceptual)", () => {
it("should accept valid topP values (0 to 1)", () => {
const validValues = [0, 0.1, 0.5, 0.9, 1];
validValues.forEach((value) => {
const model: Partial<OpenAiModel> = { topP: value };
expect(model.topP).toBe(value);
});
});

it("should accept valid frequency_penalty values (-2.0 to 2.0)", () => {
const validValues = [-2.0, -1.0, 0, 1.0, 2.0];
validValues.forEach((value) => {
const model: Partial<OpenAiModel> = { frequencyPenalty: value };
expect(model.frequencyPenalty).toBe(value);
});
});

it("should accept valid presence_penalty values (-2.0 to 2.0)", () => {
const validValues = [-2.0, -1.0, 0, 1.0, 2.0];
validValues.forEach((value) => {
const model: Partial<OpenAiModel> = { presencePenalty: value };
expect(model.presencePenalty).toBe(value);
});
});

it("should accept valid topLogprobs values (0 to 20)", () => {
const validValues = [0, 5, 10, 15, 20];
validValues.forEach((value) => {
const model: Partial<OpenAiModel> = { topLogprobs: value };
expect(model.topLogprobs).toBe(value);
});
});
});

describe("backward compatibility", () => {
it("should maintain existing temperature parameter", () => {
const model: Partial<OpenAiModel> = {
temperature: 0.7,
};

expect(model.temperature).toBe(0.7);
});

it("should maintain existing maxTokens parameter", () => {
const model: Partial<OpenAiModel> = {
maxTokens: 1000,
};

expect(model.maxTokens).toBe(1000);
});

it("should allow combining old and new parameters", () => {
const model: Partial<OpenAiModel> = {
temperature: 0.7,
maxTokens: 1000,
topP: 0.9,
frequencyPenalty: 0.5,
presencePenalty: 0.3,
seed: 42,
logprobs: true,
topLogprobs: 5,
parallelToolCalls: true,
reasoningEffort: OpenAiModelReasoningEffort.Medium,
};

expect(model.temperature).toBe(0.7);
expect(model.maxTokens).toBe(1000);
expect(model.topP).toBe(0.9);
expect(model.frequencyPenalty).toBe(0.5);
expect(model.presencePenalty).toBe(0.3);
expect(model.seed).toBe(42);
expect(model.logprobs).toBe(true);
expect(model.topLogprobs).toBe(5);
expect(model.parallelToolCalls).toBe(true);
expect(model.reasoningEffort).toBe("medium");
});
});

describe("reasoning models (o1, o3 series)", () => {
it("should support reasoningEffort for o-series models", () => {
const lowEffort: Partial<OpenAiModel> = {
reasoningEffort: OpenAiModelReasoningEffort.Low,
};
const mediumEffort: Partial<OpenAiModel> = {
reasoningEffort: OpenAiModelReasoningEffort.Medium,
};
const highEffort: Partial<OpenAiModel> = {
reasoningEffort: OpenAiModelReasoningEffort.High,
};

expect(lowEffort.reasoningEffort).toBe("low");
expect(mediumEffort.reasoningEffort).toBe("medium");
expect(highEffort.reasoningEffort).toBe("high");
});
});
});
Loading