Skip to content

Commit b8191a3

Browse files
committed
Refactor
1 parent 8533f1f commit b8191a3

File tree

6 files changed

+99
-25
lines changed

6 files changed

+99
-25
lines changed

apps/sim/app/api/wand/route.ts

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ import { env } from '@/lib/core/config/env'
1111
import { getCostMultiplier, isBillingEnabled } from '@/lib/core/config/feature-flags'
1212
import { generateRequestId } from '@/lib/core/utils/request'
1313
import { verifyWorkspaceMembership } from '@/app/api/workflows/utils'
14-
import { extractResponseText, parseResponsesUsage } from '@/providers/responses-utils'
14+
import { extractResponseText, parseResponsesUsage } from '@/providers/openai/utils'
1515
import { getModelPricing } from '@/providers/utils'
1616

1717
export const dynamic = 'force-dynamic'
@@ -386,12 +386,19 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
386386
throw new Error(parsed?.error?.message || 'Responses stream error')
387387
}
388388

389-
if (eventType === 'response.output_text.delta') {
389+
if (
390+
eventType === 'response.output_text.delta' ||
391+
eventType === 'response.output_json.delta'
392+
) {
390393
let content = ''
391394
if (typeof parsed.delta === 'string') {
392395
content = parsed.delta
393396
} else if (parsed.delta && typeof parsed.delta.text === 'string') {
394397
content = parsed.delta.text
398+
} else if (parsed.delta && parsed.delta.json !== undefined) {
399+
content = JSON.stringify(parsed.delta.json)
400+
} else if (parsed.json !== undefined) {
401+
content = JSON.stringify(parsed.json)
395402
} else if (typeof parsed.text === 'string') {
396403
content = parsed.text
397404
}

apps/sim/lib/copilot/chat-title.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { createLogger } from '@sim/logger'
22
import { env } from '@/lib/core/config/env'
3-
import { extractResponseText } from '@/providers/responses-utils'
3+
import { extractResponseText } from '@/providers/openai/utils'
44

55
const logger = createLogger('SimAgentUtils')
66

apps/sim/providers/azure-openai/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import { createLogger } from '@sim/logger'
22
import { env } from '@/lib/core/config/env'
33
import type { StreamingExecution } from '@/executor/types'
44
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
5-
import { executeResponsesProviderRequest } from '@/providers/responses-provider'
5+
import { executeResponsesProviderRequest } from '@/providers/openai/core'
66
import type { ProviderConfig, ProviderRequest, ProviderResponse } from '@/providers/types'
77

88
const logger = createLogger('AzureOpenAIProvider')
Lines changed: 65 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,14 @@
11
import type { Logger } from '@sim/logger'
22
import type { StreamingExecution } from '@/executor/types'
33
import { MAX_TOOL_ITERATIONS } from '@/providers'
4+
import type { Message, ProviderRequest, ProviderResponse, TimeSegment } from '@/providers/types'
5+
import {
6+
calculateCost,
7+
prepareToolExecution,
8+
prepareToolsWithUsageControl,
9+
trackForcedToolUsage,
10+
} from '@/providers/utils'
11+
import { executeTool } from '@/tools'
412
import {
513
buildResponsesInputFromMessages,
614
convertResponseOutputToInputItems,
@@ -12,19 +20,59 @@ import {
1220
type ResponsesInputItem,
1321
type ResponsesToolCall,
1422
toResponsesToolChoice,
15-
} from '@/providers/responses-utils'
16-
import type { Message, ProviderRequest, ProviderResponse, TimeSegment } from '@/providers/types'
17-
import {
18-
calculateCost,
19-
prepareToolExecution,
20-
prepareToolsWithUsageControl,
21-
trackForcedToolUsage,
22-
} from '@/providers/utils'
23-
import { executeTool } from '@/tools'
23+
} from './utils'
2424

2525
type PreparedTools = ReturnType<typeof prepareToolsWithUsageControl>
2626
type ToolChoice = PreparedTools['toolChoice']
2727

28+
/**
29+
* Recursively enforces OpenAI strict mode requirements on a JSON schema.
30+
* - Sets additionalProperties: false on all object types.
31+
* - Ensures required includes ALL property keys.
32+
*/
33+
function enforceStrictSchema(schema: any): any {
34+
if (!schema || typeof schema !== 'object') return schema
35+
36+
const result = { ...schema }
37+
38+
// If this is an object type, enforce strict requirements
39+
if (result.type === 'object') {
40+
result.additionalProperties = false
41+
42+
// Recursively process properties and ensure required includes all keys
43+
if (result.properties && typeof result.properties === 'object') {
44+
const propKeys = Object.keys(result.properties)
45+
result.required = propKeys // Strict mode requires ALL properties
46+
result.properties = Object.fromEntries(
47+
Object.entries(result.properties).map(([key, value]) => [key, enforceStrictSchema(value)])
48+
)
49+
}
50+
}
51+
52+
// Handle array items
53+
if (result.type === 'array' && result.items) {
54+
result.items = enforceStrictSchema(result.items)
55+
}
56+
57+
// Handle anyOf, oneOf, allOf
58+
for (const keyword of ['anyOf', 'oneOf', 'allOf']) {
59+
if (Array.isArray(result[keyword])) {
60+
result[keyword] = result[keyword].map(enforceStrictSchema)
61+
}
62+
}
63+
64+
// Handle $defs / definitions
65+
for (const defKey of ['$defs', 'definitions']) {
66+
if (result[defKey] && typeof result[defKey] === 'object') {
67+
result[defKey] = Object.fromEntries(
68+
Object.entries(result[defKey]).map(([key, value]) => [key, enforceStrictSchema(value)])
69+
)
70+
}
71+
}
72+
73+
return result
74+
}
75+
2876
export interface ResponsesProviderConfig {
2977
providerId: string
3078
providerLabel: string
@@ -97,15 +145,18 @@ export async function executeResponsesProviderRequest(
97145
}
98146

99147
if (request.responseFormat) {
148+
const isStrict = request.responseFormat.strict !== false
149+
const rawSchema = request.responseFormat.schema || request.responseFormat
150+
// OpenAI strict mode requires additionalProperties: false on ALL nested objects
151+
const cleanedSchema = isStrict ? enforceStrictSchema(rawSchema) : rawSchema
152+
100153
basePayload.text = {
101154
...(basePayload.text ?? {}),
102155
format: {
103156
type: 'json_schema',
104-
json_schema: {
105-
name: request.responseFormat.name || 'response_schema',
106-
schema: request.responseFormat.schema || request.responseFormat,
107-
strict: request.responseFormat.strict !== false,
108-
},
157+
name: request.responseFormat.name || 'response_schema',
158+
schema: cleanedSchema,
159+
strict: isStrict,
109160
},
110161
}
111162

apps/sim/providers/openai/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import { createLogger } from '@sim/logger'
22
import type { StreamingExecution } from '@/executor/types'
33
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
4-
import { executeResponsesProviderRequest } from '@/providers/responses-provider'
54
import type { ProviderConfig, ProviderRequest, ProviderResponse } from '@/providers/types'
5+
import { executeResponsesProviderRequest } from './core'
66

77
const logger = createLogger('OpenAIProvider')
88
const responsesEndpoint = 'https://api.openai.com/v1/responses'
Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -146,12 +146,21 @@ function extractTextFromMessageItem(item: any): string {
146146

147147
const textParts: string[] = []
148148
for (const part of item.content) {
149-
if (
150-
part &&
151-
(part.type === 'output_text' || part.type === 'text') &&
152-
typeof part.text === 'string'
153-
) {
149+
if (!part || typeof part !== 'object') {
150+
continue
151+
}
152+
153+
if ((part.type === 'output_text' || part.type === 'text') && typeof part.text === 'string') {
154154
textParts.push(part.text)
155+
continue
156+
}
157+
158+
if (part.type === 'output_json') {
159+
if (typeof part.text === 'string') {
160+
textParts.push(part.text)
161+
} else if (part.json !== undefined) {
162+
textParts.push(JSON.stringify(part.json))
163+
}
155164
}
156165
}
157166

@@ -412,12 +421,19 @@ export function createReadableStreamFromResponses(
412421
return
413422
}
414423

415-
if (eventType === 'response.output_text.delta') {
424+
if (
425+
eventType === 'response.output_text.delta' ||
426+
eventType === 'response.output_json.delta'
427+
) {
416428
let deltaText = ''
417429
if (typeof event.delta === 'string') {
418430
deltaText = event.delta
419431
} else if (event.delta && typeof event.delta.text === 'string') {
420432
deltaText = event.delta.text
433+
} else if (event.delta && event.delta.json !== undefined) {
434+
deltaText = JSON.stringify(event.delta.json)
435+
} else if (event.json !== undefined) {
436+
deltaText = JSON.stringify(event.json)
421437
} else if (typeof event.text === 'string') {
422438
deltaText = event.text
423439
}

0 commit comments

Comments
 (0)