@@ -17,7 +17,8 @@ function extractRequestMetadata(params: { body: unknown; logger: Logger }) {
1717 logger . warn ( { body } , 'Received request without client_id' )
1818 }
1919 const rawRunId = ( body as any ) ?. codebuff_metadata ?. run_id
20- const clientRequestId : string | null = typeof rawRunId === 'string' ? rawRunId : null
20+ const clientRequestId : string | null =
21+ typeof rawRunId === 'string' ? rawRunId : null
2122 if ( ! clientRequestId ) {
2223 logger . warn ( { body } , 'Received request without run_id' )
2324 }
@@ -40,9 +41,16 @@ type OpenAIUsage = {
4041 cost_details ?: { upstream_inference_cost ?: number | null } | null
4142}
4243
43- function getOpenAIRatesPerMTokens ( model : string ) : { inUsd : number ; outUsd : number } {
44+ function getOpenAIRatesPerMTokens ( model : string ) : {
45+ inUsd : number
46+ outUsd : number
47+ } {
4448 const m = model . toLowerCase ( )
45- if ( m . includes ( 'gpt-4o-mini' ) || m . includes ( '4o-mini' ) || m . includes ( 'o4-mini' ) ) {
49+ if (
50+ m . includes ( 'gpt-4o-mini' ) ||
51+ m . includes ( '4o-mini' ) ||
52+ m . includes ( 'o4-mini' )
53+ ) {
4654 return { inUsd : 0.15 , outUsd : 0.6 }
4755 }
4856 if ( m . includes ( 'gpt-4o' ) ) {
@@ -98,7 +106,8 @@ export async function handleOpenAIStream({
98106 openaiBody . stream_options = streamOptions
99107
100108 // Transform max_tokens to max_completion_tokens
101- openaiBody . max_completion_tokens = openaiBody . max_tokens
109+ openaiBody . max_completion_tokens =
110+ openaiBody . max_completion_tokens ?? openaiBody . max_tokens
102111 delete ( openaiBody as any ) . max_tokens
103112
104113 // Remove fields that OpenAI doesn't support
@@ -118,7 +127,9 @@ export async function handleOpenAIStream({
118127 } )
119128
120129 if ( ! response . ok ) {
121- throw new Error ( `OpenAI API error: ${ response . status } ${ response . statusText } ${ await response . text ( ) } ` )
130+ throw new Error (
131+ `OpenAI API error: ${ response . status } ${ response . statusText } ${ await response . text ( ) } ` ,
132+ )
122133 }
123134
124135 const reader = response . body ?. getReader ?.( )
@@ -135,12 +146,18 @@ export async function handleOpenAIStream({
135146 const decoder = new TextDecoder ( )
136147 let buffer = ''
137148
138- controller . enqueue ( new TextEncoder ( ) . encode ( `: connected ${ new Date ( ) . toISOString ( ) } \n` ) )
149+ controller . enqueue (
150+ new TextEncoder ( ) . encode ( `: connected ${ new Date ( ) . toISOString ( ) } \n` ) ,
151+ )
139152
140153 heartbeatInterval = setInterval ( ( ) => {
141154 if ( ! clientDisconnected ) {
142155 try {
143- controller . enqueue ( new TextEncoder ( ) . encode ( `: heartbeat ${ new Date ( ) . toISOString ( ) } \n\n` ) )
156+ controller . enqueue (
157+ new TextEncoder ( ) . encode (
158+ `: heartbeat ${ new Date ( ) . toISOString ( ) } \n\n` ,
159+ ) ,
160+ )
144161 } catch { }
145162 }
146163 } , 30000 )
@@ -176,7 +193,9 @@ export async function handleOpenAIStream({
176193 try {
177194 controller . enqueue ( new TextEncoder ( ) . encode ( line ) )
178195 } catch ( error ) {
179- logger . warn ( 'Client disconnected during stream, continuing for billing' )
196+ logger . warn (
197+ 'Client disconnected during stream, continuing for billing' ,
198+ )
180199 clientDisconnected = true
181200 }
182201 }
@@ -192,7 +211,10 @@ export async function handleOpenAIStream({
192211 if ( ! clientDisconnected ) {
193212 controller . error ( error )
194213 } else {
195- logger . warn ( getErrorObject ( error ) , 'Error after client disconnect in OpenAI stream' )
214+ logger . warn (
215+ getErrorObject ( error ) ,
216+ 'Error after client disconnect in OpenAI stream' ,
217+ )
196218 }
197219 } finally {
198220 clearInterval ( heartbeatInterval )
@@ -201,7 +223,10 @@ export async function handleOpenAIStream({
201223 cancel ( ) {
202224 clearInterval ( heartbeatInterval )
203225 clientDisconnected = true
204- logger . warn ( { clientDisconnected, state } , 'Client cancelled stream, continuing OpenAI consumption for billing' )
226+ logger . warn (
227+ { clientDisconnected, state } ,
228+ 'Client cancelled stream, continuing OpenAI consumption for billing' ,
229+ )
205230 } ,
206231 } )
207232
@@ -243,25 +268,36 @@ async function handleOpenAILine({
243268 try {
244269 obj = JSON . parse ( raw )
245270 } catch ( error ) {
246- logger . warn ( `Received non-JSON OpenAI response: ${ JSON . stringify ( getErrorObject ( error ) , null , 2 ) } ` )
271+ logger . warn (
272+ `Received non-JSON OpenAI response: ${ JSON . stringify ( getErrorObject ( error ) , null , 2 ) } ` ,
273+ )
247274 return { state, outgoingLine : line }
248275 }
249276
250277 // Accumulate text
251278 try {
252- const choice = Array . isArray ( obj . choices ) && obj . choices . length ? obj . choices [ 0 ] : undefined
279+ const choice =
280+ Array . isArray ( obj . choices ) && obj . choices . length
281+ ? obj . choices [ 0 ]
282+ : undefined
253283 const delta = choice ?. delta
254284 if ( delta ) {
255285 if ( typeof delta . content === 'string' ) state . responseText += delta . content
256286 // OpenAI may not provide reasoning delta in standard chat completions; keep parity
257- if ( typeof delta . reasoning === 'string' ) state . reasoningText += delta . reasoning
287+ if ( typeof delta . reasoning === 'string' )
288+ state . reasoningText += delta . reasoning
258289 }
259290 } catch { }
260291
261292 // If usage present, it's the final chunk. Compute cost, log, and consume credits.
262293 if ( obj && obj . usage ) {
263294 const usage : OpenAIUsage = obj . usage
264- const model : string = typeof obj . model === 'string' ? obj . model : ( typeof ( request as any ) ?. model === 'string' ? ( request as any ) . model : '' )
295+ const model : string =
296+ typeof obj . model === 'string'
297+ ? obj . model
298+ : typeof ( request as any ) ?. model === 'string'
299+ ? ( request as any ) . model
300+ : ''
265301
266302 const cost = computeCostDollars ( usage , model )
267303 obj . usage . cost = cost
@@ -288,7 +324,10 @@ async function handleOpenAILine({
288324 logger,
289325 } )
290326 if ( ! success ) {
291- logger . error ( { request } , 'Failed to insert message into BigQuery (OpenAI)' )
327+ logger . error (
328+ { request } ,
329+ 'Failed to insert message into BigQuery (OpenAI)' ,
330+ )
292331 }
293332 } )
294333
@@ -307,7 +346,8 @@ async function handleOpenAILine({
307346 inputTokens : obj . usage . prompt_tokens ?? 0 ,
308347 cacheCreationInputTokens : null ,
309348 cacheReadInputTokens : obj . usage . prompt_tokens_details ?. cached_tokens ?? 0 ,
310- reasoningTokens : obj . usage . completion_tokens_details ?. reasoning_tokens ?? null ,
349+ reasoningTokens :
350+ obj . usage . completion_tokens_details ?. reasoning_tokens ?? null ,
311351 outputTokens : obj . usage . completion_tokens ?? 0 ,
312352 logger,
313353 } )
0 commit comments