From 76950264c8f711113e2ed7c5bcbc022bfb04b1e8 Mon Sep 17 00:00:00 2001 From: Vapi Tasker Date: Sat, 31 Jan 2026 07:36:22 +0000 Subject: [PATCH] feat: add OpenAI spec updates for GPT-5.x and o-series models VAP-11729 - Add 'developer' role to OpenAiMessageRole enum for GPT-5.x and o-series models - Add deprecation notice for 'function' role in favor of 'tool' - Add new optional API parameters to OpenAiModel interface: - seed: for deterministic sampling - topP: nucleus sampling parameter - frequencyPenalty: penalize repeated tokens - presencePenalty: encourage new topics - logprobs: return token log probabilities - topLogprobs: number of top log probabilities to return - parallelToolCalls: enable parallel function calling - reasoningEffort: control reasoning depth for o1/o3 models - Add OpenAiModelReasoningEffort type with low/medium/high options - Add comprehensive unit tests for all changes All changes are backward compatible and follow existing code patterns. Co-Authored-By: Claude --- .fernignore | 4 + src/api/types/OpenAiMessageRole.ts | 16 ++ src/api/types/OpenAiModel.ts | 69 +++++++ src/api/types/OpenAiModelReasoningEffort.ts | 15 ++ src/api/types/index.ts | 1 + tests/unit/types/OpenAiMessageRole.test.ts | 73 ++++++++ tests/unit/types/OpenAiModel.test.ts | 172 ++++++++++++++++++ .../types/OpenAiModelReasoningEffort.test.ts | 44 +++++ 8 files changed, 394 insertions(+) create mode 100644 src/api/types/OpenAiModelReasoningEffort.ts create mode 100644 tests/unit/types/OpenAiMessageRole.test.ts create mode 100644 tests/unit/types/OpenAiModel.test.ts create mode 100644 tests/unit/types/OpenAiModelReasoningEffort.test.ts diff --git a/.fernignore b/.fernignore index 43ca0d22..deed03ac 100644 --- a/.fernignore +++ b/.fernignore @@ -1,3 +1,7 @@ # Specify files that shouldn't be modified by Fern README.md +tests/unit/types/OpenAiMessageRole.test.ts +tests/unit/types/OpenAiModelReasoningEffort.test.ts +tests/unit/types/OpenAiModel.test.ts +tests/custom.test.ts diff --git a/src/api/types/OpenAiMessageRole.ts b/src/api/types/OpenAiMessageRole.ts index a5b26da8..f8961a6d 100644 --- a/src/api/types/OpenAiMessageRole.ts +++ b/src/api/types/OpenAiMessageRole.ts @@ -1,10 +1,26 @@ // This file was auto-generated by Fern from our API Definition. +/** + * Roles for OpenAI messages. + * + * Note: The "function" role is deprecated in favor of "tool". Use "tool" for new implementations. + * The "developer" role is required for GPT-5.x and o-series models. + */ export const OpenAiMessageRole = { Assistant: "assistant", + /** + * @deprecated The "function" role is deprecated in favor of "tool". Use "tool" for new implementations. + * @see https://platform.openai.com/docs/guides/function-calling + */ Function: "function", User: "user", System: "system", Tool: "tool", + /** + * The "developer" role is used for system-level instructions in GPT-5.x and o-series models. + * It provides a way to set high-level instructions that take precedence over user messages. + * @see https://platform.openai.com/docs/guides/text-generation + */ + Developer: "developer", } as const; export type OpenAiMessageRole = (typeof OpenAiMessageRole)[keyof typeof OpenAiMessageRole]; diff --git a/src/api/types/OpenAiModel.ts b/src/api/types/OpenAiModel.ts index e7d54135..aa5edc26 100644 --- a/src/api/types/OpenAiModel.ts +++ b/src/api/types/OpenAiModel.ts @@ -80,4 +80,73 @@ export interface OpenAiModel { * @default 0 */ numFastTurns?: number; + /** + * If specified, the system will make a best effort to sample deterministically, + * such that repeated requests with the same seed and parameters should return the same result. + * Determinism is not guaranteed. + * + * @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-seed + */ + seed?: number; + /** + * An alternative to sampling with temperature, called nucleus sampling, + * where the model considers the results of the tokens with top_p probability mass. + * So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + * + * @default 1 + * @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p + */ + topP?: number; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + * + * @default 0 + * @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty + */ + frequencyPenalty?: number; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they + * appear in the text so far, increasing the model's likelihood to talk about new topics. + * + * @default 0 + * @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty + */ + presencePenalty?: number; + /** + * Whether to return log probabilities of the output tokens or not. + * If true, returns the log probabilities of each output token returned in the content of message. + * + * @default false + * @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-logprobs + */ + logprobs?: boolean; + /** + * An integer between 0 and 20 specifying the number of most likely tokens to return at each + * token position, each with an associated log probability. logprobs must be set to true if + * this parameter is used. + * + * @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_logprobs + */ + topLogprobs?: number; + /** + * Whether to enable parallel function calling during tool use. + * When set to true, the model can call multiple functions in a single response. + * + * @default true + * @see https://platform.openai.com/docs/guides/function-calling#parallel-function-calling + */ + parallelToolCalls?: boolean; + /** + * Constrains effort on reasoning for reasoning models (o1, o3, etc.). + * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + * + * Possible values: "low", "medium", "high" + * + * @default "medium" + * @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort + */ + reasoningEffort?: Vapi.OpenAiModelReasoningEffort; } diff --git a/src/api/types/OpenAiModelReasoningEffort.ts b/src/api/types/OpenAiModelReasoningEffort.ts new file mode 100644 index 00000000..ea993fd4 --- /dev/null +++ b/src/api/types/OpenAiModelReasoningEffort.ts @@ -0,0 +1,15 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * Constrains effort on reasoning for reasoning models (o1, o3, etc.). + * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + * + * @default "medium" + * @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort + */ +export const OpenAiModelReasoningEffort = { + Low: "low", + Medium: "medium", + High: "high", +} as const; +export type OpenAiModelReasoningEffort = (typeof OpenAiModelReasoningEffort)[keyof typeof OpenAiModelReasoningEffort]; diff --git a/src/api/types/index.ts b/src/api/types/index.ts index 7513ae41..65b7272c 100644 --- a/src/api/types/index.ts +++ b/src/api/types/index.ts @@ -1095,6 +1095,7 @@ export * from "./OpenAiModelFallbackModelsItem.js"; export * from "./OpenAiModelModel.js"; export * from "./OpenAiModelPromptCacheRetention.js"; export * from "./OpenAiModelProvider.js"; +export * from "./OpenAiModelReasoningEffort.js"; export * from "./OpenAiModelToolStrictCompatibilityMode.js"; export * from "./OpenAiModelToolsItem.js"; export * from "./OpenAiTranscriber.js"; diff --git a/tests/unit/types/OpenAiMessageRole.test.ts b/tests/unit/types/OpenAiMessageRole.test.ts new file mode 100644 index 00000000..ef3c013f --- /dev/null +++ b/tests/unit/types/OpenAiMessageRole.test.ts @@ -0,0 +1,73 @@ +/** + * Tests for OpenAiMessageRole enum + * VAP-11729: Update server-sdk-typescript SDK with OpenAI spec updates + */ +import { describe, it, expect } from "vitest"; +import { OpenAiMessageRole } from "../../../src/api/types/OpenAiMessageRole.js"; + +describe("OpenAiMessageRole", () => { + describe("enum values", () => { + it("should have 'assistant' role", () => { + expect(OpenAiMessageRole.Assistant).toBe("assistant"); + }); + + it("should have 'function' role (deprecated)", () => { + expect(OpenAiMessageRole.Function).toBe("function"); + }); + + it("should have 'user' role", () => { + expect(OpenAiMessageRole.User).toBe("user"); + }); + + it("should have 'system' role", () => { + expect(OpenAiMessageRole.System).toBe("system"); + }); + + it("should have 'tool' role", () => { + expect(OpenAiMessageRole.Tool).toBe("tool"); + }); + + it("should have 'developer' role for GPT-5.x and o-series models", () => { + expect(OpenAiMessageRole.Developer).toBe("developer"); + }); + }); + + describe("type safety", () => { + it("should accept valid role values", () => { + const roles: OpenAiMessageRole[] = [ + OpenAiMessageRole.Assistant, + OpenAiMessageRole.Function, + OpenAiMessageRole.User, + OpenAiMessageRole.System, + OpenAiMessageRole.Tool, + OpenAiMessageRole.Developer, + ]; + + expect(roles).toHaveLength(6); + expect(roles).toContain("assistant"); + expect(roles).toContain("function"); + expect(roles).toContain("user"); + expect(roles).toContain("system"); + expect(roles).toContain("tool"); + expect(roles).toContain("developer"); + }); + + it("should have all expected roles defined", () => { + const expectedRoles = ["assistant", "function", "user", "system", "tool", "developer"]; + const actualRoles = Object.values(OpenAiMessageRole); + + expect(actualRoles.sort()).toEqual(expectedRoles.sort()); + }); + }); + + describe("backward compatibility", () => { + it("should maintain existing role string values", () => { + // Ensure existing roles haven't changed their string values + expect(OpenAiMessageRole.Assistant).toBe("assistant"); + expect(OpenAiMessageRole.Function).toBe("function"); + expect(OpenAiMessageRole.User).toBe("user"); + expect(OpenAiMessageRole.System).toBe("system"); + expect(OpenAiMessageRole.Tool).toBe("tool"); + }); + }); +}); diff --git a/tests/unit/types/OpenAiModel.test.ts b/tests/unit/types/OpenAiModel.test.ts new file mode 100644 index 00000000..4c87a869 --- /dev/null +++ b/tests/unit/types/OpenAiModel.test.ts @@ -0,0 +1,172 @@ +/** + * Tests for OpenAiModel interface + * VAP-11729: Update server-sdk-typescript SDK with OpenAI spec updates + */ +import { describe, it, expect } from "vitest"; +import type { OpenAiModel } from "../../../src/api/types/OpenAiModel.js"; +import { OpenAiModelReasoningEffort } from "../../../src/api/types/OpenAiModelReasoningEffort.js"; + +describe("OpenAiModel", () => { + describe("new optional parameters", () => { + it("should allow creating a model config with seed parameter", () => { + const model: Partial = { + seed: 42, + }; + + expect(model.seed).toBe(42); + }); + + it("should allow creating a model config with topP parameter", () => { + const model: Partial = { + topP: 0.9, + }; + + expect(model.topP).toBe(0.9); + }); + + it("should allow creating a model config with frequencyPenalty parameter", () => { + const model: Partial = { + frequencyPenalty: 0.5, + }; + + expect(model.frequencyPenalty).toBe(0.5); + }); + + it("should allow creating a model config with presencePenalty parameter", () => { + const model: Partial = { + presencePenalty: 0.3, + }; + + expect(model.presencePenalty).toBe(0.3); + }); + + it("should allow creating a model config with logprobs parameter", () => { + const model: Partial = { + logprobs: true, + }; + + expect(model.logprobs).toBe(true); + }); + + it("should allow creating a model config with topLogprobs parameter", () => { + const model: Partial = { + logprobs: true, + topLogprobs: 5, + }; + + expect(model.topLogprobs).toBe(5); + }); + + it("should allow creating a model config with parallelToolCalls parameter", () => { + const model: Partial = { + parallelToolCalls: false, + }; + + expect(model.parallelToolCalls).toBe(false); + }); + + it("should allow creating a model config with reasoningEffort parameter", () => { + const model: Partial = { + reasoningEffort: OpenAiModelReasoningEffort.High, + }; + + expect(model.reasoningEffort).toBe("high"); + }); + }); + + describe("parameter validation (conceptual)", () => { + it("should accept valid topP values (0 to 1)", () => { + const validValues = [0, 0.1, 0.5, 0.9, 1]; + validValues.forEach((value) => { + const model: Partial = { topP: value }; + expect(model.topP).toBe(value); + }); + }); + + it("should accept valid frequency_penalty values (-2.0 to 2.0)", () => { + const validValues = [-2.0, -1.0, 0, 1.0, 2.0]; + validValues.forEach((value) => { + const model: Partial = { frequencyPenalty: value }; + expect(model.frequencyPenalty).toBe(value); + }); + }); + + it("should accept valid presence_penalty values (-2.0 to 2.0)", () => { + const validValues = [-2.0, -1.0, 0, 1.0, 2.0]; + validValues.forEach((value) => { + const model: Partial = { presencePenalty: value }; + expect(model.presencePenalty).toBe(value); + }); + }); + + it("should accept valid topLogprobs values (0 to 20)", () => { + const validValues = [0, 5, 10, 15, 20]; + validValues.forEach((value) => { + const model: Partial = { topLogprobs: value }; + expect(model.topLogprobs).toBe(value); + }); + }); + }); + + describe("backward compatibility", () => { + it("should maintain existing temperature parameter", () => { + const model: Partial = { + temperature: 0.7, + }; + + expect(model.temperature).toBe(0.7); + }); + + it("should maintain existing maxTokens parameter", () => { + const model: Partial = { + maxTokens: 1000, + }; + + expect(model.maxTokens).toBe(1000); + }); + + it("should allow combining old and new parameters", () => { + const model: Partial = { + temperature: 0.7, + maxTokens: 1000, + topP: 0.9, + frequencyPenalty: 0.5, + presencePenalty: 0.3, + seed: 42, + logprobs: true, + topLogprobs: 5, + parallelToolCalls: true, + reasoningEffort: OpenAiModelReasoningEffort.Medium, + }; + + expect(model.temperature).toBe(0.7); + expect(model.maxTokens).toBe(1000); + expect(model.topP).toBe(0.9); + expect(model.frequencyPenalty).toBe(0.5); + expect(model.presencePenalty).toBe(0.3); + expect(model.seed).toBe(42); + expect(model.logprobs).toBe(true); + expect(model.topLogprobs).toBe(5); + expect(model.parallelToolCalls).toBe(true); + expect(model.reasoningEffort).toBe("medium"); + }); + }); + + describe("reasoning models (o1, o3 series)", () => { + it("should support reasoningEffort for o-series models", () => { + const lowEffort: Partial = { + reasoningEffort: OpenAiModelReasoningEffort.Low, + }; + const mediumEffort: Partial = { + reasoningEffort: OpenAiModelReasoningEffort.Medium, + }; + const highEffort: Partial = { + reasoningEffort: OpenAiModelReasoningEffort.High, + }; + + expect(lowEffort.reasoningEffort).toBe("low"); + expect(mediumEffort.reasoningEffort).toBe("medium"); + expect(highEffort.reasoningEffort).toBe("high"); + }); + }); +}); diff --git a/tests/unit/types/OpenAiModelReasoningEffort.test.ts b/tests/unit/types/OpenAiModelReasoningEffort.test.ts new file mode 100644 index 00000000..ffd9dedc --- /dev/null +++ b/tests/unit/types/OpenAiModelReasoningEffort.test.ts @@ -0,0 +1,44 @@ +/** + * Tests for OpenAiModelReasoningEffort enum + * VAP-11729: Update server-sdk-typescript SDK with OpenAI spec updates + */ +import { describe, it, expect } from "vitest"; +import { OpenAiModelReasoningEffort } from "../../../src/api/types/OpenAiModelReasoningEffort.js"; + +describe("OpenAiModelReasoningEffort", () => { + describe("enum values", () => { + it("should have 'low' reasoning effort level", () => { + expect(OpenAiModelReasoningEffort.Low).toBe("low"); + }); + + it("should have 'medium' reasoning effort level (default)", () => { + expect(OpenAiModelReasoningEffort.Medium).toBe("medium"); + }); + + it("should have 'high' reasoning effort level", () => { + expect(OpenAiModelReasoningEffort.High).toBe("high"); + }); + }); + + describe("type safety", () => { + it("should accept valid reasoning effort values", () => { + const efforts: OpenAiModelReasoningEffort[] = [ + OpenAiModelReasoningEffort.Low, + OpenAiModelReasoningEffort.Medium, + OpenAiModelReasoningEffort.High, + ]; + + expect(efforts).toHaveLength(3); + expect(efforts).toContain("low"); + expect(efforts).toContain("medium"); + expect(efforts).toContain("high"); + }); + + it("should have all expected effort levels defined", () => { + const expectedEfforts = ["low", "medium", "high"]; + const actualEfforts = Object.values(OpenAiModelReasoningEffort); + + expect(actualEfforts.sort()).toEqual(expectedEfforts.sort()); + }); + }); +});