Add Ollama provider for local LLM support
Reuses OpenAIProvider via Ollama's OpenAI-compatible API at localhost:11434. No API key needed - just install Ollama, pull a model, and set LLM_PROVIDER=ollama. Vision models (llava, llama3.2-vision) supported for screenshot fallback. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -7,6 +7,7 @@
|
||||
// API Endpoints
|
||||
// ===========================================
|
||||
export const GROQ_API_BASE_URL = "https://api.groq.com/openai/v1";
|
||||
export const OLLAMA_API_BASE_URL = "http://localhost:11434/v1";
|
||||
|
||||
// ===========================================
|
||||
// ADB Key Codes
|
||||
@@ -80,6 +81,7 @@ export const DEFAULT_GROQ_MODEL = "llama-3.3-70b-versatile";
|
||||
export const DEFAULT_OPENAI_MODEL = "gpt-4o";
|
||||
export const DEFAULT_BEDROCK_MODEL = "us.meta.llama3-3-70b-instruct-v1:0";
|
||||
export const DEFAULT_OPENROUTER_MODEL = "anthropic/claude-3.5-sonnet";
|
||||
export const DEFAULT_OLLAMA_MODEL = "llama3.2";
|
||||
|
||||
// ===========================================
|
||||
// Bedrock Model Identifiers
|
||||
|
||||
Reference in New Issue
Block a user