diff --git a/server/src/agent/llm.ts b/server/src/agent/llm.ts index 86563ac..371aa75 100644 --- a/server/src/agent/llm.ts +++ b/server/src/agent/llm.ts @@ -22,7 +22,8 @@ export interface LLMProvider { getAction( systemPrompt: string, userPrompt: string, - imageBase64?: string + imageBase64?: string, + signal?: AbortSignal ): Promise; } @@ -381,7 +382,8 @@ export function getLlmProvider(config: LLMConfig): LLMProvider { async getAction( systemPrompt: string, userPrompt: string, - imageBase64?: string + imageBase64?: string, + signal?: AbortSignal ): Promise { const messages: Array<{ role: string; content: unknown }> = [ { role: "system", content: systemPrompt }, @@ -418,6 +420,7 @@ export function getLlmProvider(config: LLMConfig): LLMProvider { max_tokens: 1024, response_format: { type: "json_object" }, }), + signal, }); if (!response.ok) { diff --git a/server/src/agent/loop.ts b/server/src/agent/loop.ts index 7cdcddf..d856a2e 100644 --- a/server/src/agent/loop.ts +++ b/server/src/agent/loop.ts @@ -489,9 +489,11 @@ export async function runAgentLoop( rawResponse = await llm.getAction( systemPrompt, userPrompt, - useScreenshot ? screenshot : undefined + useScreenshot ? screenshot : undefined, + signal ); } catch (err) { + if (signal?.aborted) break; console.error( `[Agent ${sessionId}] LLM error at step ${step + 1}: ${(err as Error).message}` ); @@ -510,7 +512,8 @@ export async function runAgentLoop( rawResponse = await llm.getAction( systemPrompt, userPrompt + "\n\nIMPORTANT: Your previous response was not valid JSON. You MUST respond with ONLY a valid JSON object.", - useScreenshot ? screenshot : undefined + useScreenshot ? screenshot : undefined, + signal ); parsed = parseJsonResponse(rawResponse); } catch { @@ -634,6 +637,7 @@ export async function runAgentLoop( } // ── 10. Brief pause for UI to settle ──────────────────── + if (signal?.aborted) break; await new Promise((r) => setTimeout(r, 500)); } } catch (error) {