From 1de514b9d6c2889d34a8cc88fdc6037460678da1 Mon Sep 17 00:00:00 2001 From: badhra-ajaz Date: Wed, 11 Mar 2026 10:08:24 +0530 Subject: [PATCH 1/2] fix(logging): replace console.warn with logger in stars API route The stars API route used console.warn() for error logging while every other API route in the codebase uses createLogger from @sim/logger. This makes warning output from this route invisible to the structured logging pipeline. Replaced both console.warn() calls with logger.warn() to match the pattern used across all other API routes. --- apps/sim/app/api/stars/route.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/apps/sim/app/api/stars/route.ts b/apps/sim/app/api/stars/route.ts index 97106b13842..72378b6744c 100644 --- a/apps/sim/app/api/stars/route.ts +++ b/apps/sim/app/api/stars/route.ts @@ -1,6 +1,9 @@ import { NextResponse } from 'next/server' +import { createLogger } from '@sim/logger' import { env } from '@/lib/core/config/env' +const logger = createLogger('StarsAPI') + function formatStarCount(num: number): string { if (num < 1000) return String(num) const formatted = (Math.round(num / 100) / 10).toFixed(1) @@ -22,14 +25,14 @@ export async function GET() { }) if (!response.ok) { - console.warn('GitHub API request failed:', response.status) + logger.warn('GitHub API request failed:', response.status) return NextResponse.json({ stars: formatStarCount(19400) }) } const data = await response.json() return NextResponse.json({ stars: formatStarCount(Number(data?.stargazers_count ?? 19400)) }) } catch (error) { - console.warn('Error fetching GitHub stars:', error) + logger.warn('Error fetching GitHub stars:', error) return NextResponse.json({ stars: formatStarCount(19400) }) } } From 0ecde3638f93996b227cc867650e69065762040b Mon Sep 17 00:00:00 2001 From: badhra-ajaz Date: Thu, 12 Mar 2026 11:12:29 +0530 Subject: [PATCH 2/2] fix(providers): add 60s timeout to all OpenAI provider fetch requests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, fetch() calls to OpenAI endpoints in the Responses API provider have no timeout, which means HTTP requests can hang indefinitely on network issues, server unresponsiveness, or slow model responses. This causes: - Simulations stuck waiting forever for LLM responses - Resource exhaustion when multiple sims run concurrently - Poor user experience (no error feedback, hanging UI) - Wasted compute resources on hung HTTP requests This adds a 60-second timeout using AbortSignal.timeout() while preserving any existing abort signals via AbortSignal.any(). **PeakInfer Issue:** Missing timeout on LLM API HTTP requests **Impact:** Prevents indefinite hangs and improves reliability **Category:** Reliability + Latency Changes: - Added 60s timeout to postResponses() fetch (line 265-268) - Added 60s timeout to streaming fetch (line 293-296) - Added 60s timeout to final streaming fetch after tools (line 718-721) - Preserves existing abortSignal functionality via AbortSignal.any() - Applies to all OpenAI-compatible providers (OpenAI, Azure, etc.) This follows PeakInfer best practices for production LLM systems: - Prevents resource exhaustion from hung requests - Enables faster error detection and recovery - Improves system resilience under network issues - 60s timeout balances patience for long responses vs system health 🤖 Generated with PeakInfer LLM inference optimization --- apps/sim/providers/openai/core.ts | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/apps/sim/providers/openai/core.ts b/apps/sim/providers/openai/core.ts index 139e12eaa3d..afe519d69d6 100644 --- a/apps/sim/providers/openai/core.ts +++ b/apps/sim/providers/openai/core.ts @@ -261,11 +261,17 @@ export async function executeResponsesProviderRequest( const postResponses = async ( body: Record ): Promise => { + // Create a 60s timeout signal and combine with any existing abort signal + const timeoutSignal = AbortSignal.timeout(60000) // 60 seconds + const combinedSignal = request.abortSignal + ? AbortSignal.any([timeoutSignal, request.abortSignal]) + : timeoutSignal + const response = await fetch(config.endpoint, { method: 'POST', headers: config.headers, body: JSON.stringify(body), - signal: request.abortSignal, + signal: combinedSignal, }) if (!response.ok) { @@ -283,11 +289,17 @@ export async function executeResponsesProviderRequest( if (request.stream && (!tools || tools.length === 0)) { logger.info(`Using streaming response for ${config.providerLabel} request`) + // Create a 60s timeout signal and combine with any existing abort signal + const timeoutSignal = AbortSignal.timeout(60000) // 60 seconds + const combinedSignal = request.abortSignal + ? AbortSignal.any([timeoutSignal, request.abortSignal]) + : timeoutSignal + const streamResponse = await fetch(config.endpoint, { method: 'POST', headers: config.headers, body: JSON.stringify(createRequestBody(initialInput, { stream: true })), - signal: request.abortSignal, + signal: combinedSignal, }) if (!streamResponse.ok) { @@ -702,11 +714,17 @@ export async function executeResponsesProviderRequest( } } + // Create a 60s timeout signal and combine with any existing abort signal + const timeoutSignal = AbortSignal.timeout(60000) // 60 seconds + const combinedSignal = request.abortSignal + ? AbortSignal.any([timeoutSignal, request.abortSignal]) + : timeoutSignal + const streamResponse = await fetch(config.endpoint, { method: 'POST', headers: config.headers, body: JSON.stringify(createRequestBody(currentInput, streamOverrides)), - signal: request.abortSignal, + signal: combinedSignal, }) if (!streamResponse.ok) {