Add Ollama provider for local LLM support

Reuses OpenAIProvider via Ollama's OpenAI-compatible API at localhost:11434.
No API key needed - just install Ollama, pull a model, and set LLM_PROVIDER=ollama.
Vision models (llava, llama3.2-vision) supported for screenshot fallback.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Sanju Sivalingam
2026-02-16 13:28:37 +05:30
parent fb3e7bc723
commit 75fb725744
7 changed files with 62 additions and 14 deletions

View File

@@ -7,6 +7,7 @@
// API Endpoints
// ===========================================
export const GROQ_API_BASE_URL = "https://api.groq.com/openai/v1";
export const OLLAMA_API_BASE_URL = "http://localhost:11434/v1";
// ===========================================
// ADB Key Codes
@@ -80,6 +81,7 @@ export const DEFAULT_GROQ_MODEL = "llama-3.3-70b-versatile";
export const DEFAULT_OPENAI_MODEL = "gpt-4o";
export const DEFAULT_BEDROCK_MODEL = "us.meta.llama3-3-70b-instruct-v1:0";
export const DEFAULT_OPENROUTER_MODEL = "anthropic/claude-3.5-sonnet";
export const DEFAULT_OLLAMA_MODEL = "llama3.2";
// ===========================================
// Bedrock Model Identifiers