Add Ollama provider for local LLM support

Reuses OpenAIProvider via Ollama's OpenAI-compatible API at localhost:11434.
No API key needed - just install Ollama, pull a model, and set LLM_PROVIDER=ollama.
Vision models (llava, llama3.2-vision) supported for screenshot fallback.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Sanju Sivalingam
2026-02-16 13:28:37 +05:30
parent fb3e7bc723
commit 75fb725744
7 changed files with 62 additions and 14 deletions

View File

@@ -13,6 +13,7 @@ import {
DEFAULT_GROQ_MODEL,
DEFAULT_OPENAI_MODEL,
DEFAULT_BEDROCK_MODEL,
DEFAULT_OLLAMA_MODEL,
DEFAULT_MAX_RETRIES,
DEFAULT_STUCK_THRESHOLD,
DEFAULT_MAX_ELEMENTS,
@@ -56,7 +57,7 @@ export const Config = {
// Streaming responses
STREAMING_ENABLED: env("STREAMING_ENABLED", String(DEFAULT_STREAMING_ENABLED)) === "true",
// LLM Provider: "groq", "openai", "bedrock", or "openrouter"
// LLM Provider: "groq", "openai", "bedrock", "openrouter", or "ollama"
LLM_PROVIDER: env("LLM_PROVIDER", "groq"),
// Groq Configuration
@@ -75,11 +76,16 @@ export const Config = {
OPENROUTER_API_KEY: env("OPENROUTER_API_KEY"),
OPENROUTER_MODEL: env("OPENROUTER_MODEL", "anthropic/claude-3.5-sonnet"),
// Ollama Configuration (local LLMs, no API key needed)
OLLAMA_BASE_URL: env("OLLAMA_BASE_URL", "http://localhost:11434/v1"),
OLLAMA_MODEL: env("OLLAMA_MODEL", DEFAULT_OLLAMA_MODEL),
getModel(): string {
const provider = Config.LLM_PROVIDER;
if (provider === "groq") return Config.GROQ_MODEL;
if (provider === "bedrock") return Config.BEDROCK_MODEL;
if (provider === "openrouter") return Config.OPENROUTER_MODEL;
if (provider === "ollama") return Config.OLLAMA_MODEL;
return Config.OPENAI_MODEL;
},

View File

@@ -7,6 +7,7 @@
// API Endpoints
// ===========================================
export const GROQ_API_BASE_URL = "https://api.groq.com/openai/v1";
export const OLLAMA_API_BASE_URL = "http://localhost:11434/v1";
// ===========================================
// ADB Key Codes
@@ -80,6 +81,7 @@ export const DEFAULT_GROQ_MODEL = "llama-3.3-70b-versatile";
export const DEFAULT_OPENAI_MODEL = "gpt-4o";
export const DEFAULT_BEDROCK_MODEL = "us.meta.llama3-3-70b-instruct-v1:0";
export const DEFAULT_OPENROUTER_MODEL = "anthropic/claude-3.5-sonnet";
export const DEFAULT_OLLAMA_MODEL = "llama3.2";
// ===========================================
// Bedrock Model Identifiers

View File

@@ -1,6 +1,6 @@
/**
* LLM Provider module for DroidClaw.
* Supports OpenAI, Groq, AWS Bedrock, and OpenRouter (via Vercel AI SDK).
* Supports OpenAI, Groq, AWS Bedrock, OpenRouter (via Vercel AI SDK), and Ollama (local).
*
* Phase 3: Real multimodal vision (image content parts)
* Phase 4A: Multi-turn conversation memory (ChatMessage[] interface)
@@ -20,6 +20,7 @@ import { z } from "zod";
import { Config } from "./config.js";
import {
GROQ_API_BASE_URL,
OLLAMA_API_BASE_URL,
BEDROCK_ANTHROPIC_MODELS,
BEDROCK_META_MODELS,
} from "./constants.js";
@@ -265,6 +266,14 @@ class OpenAIProvider implements LLMProvider {
});
this.model = Config.GROQ_MODEL;
this.capabilities = { supportsImages: false, supportsStreaming: true };
} else if (Config.LLM_PROVIDER === "ollama") {
this.client = new OpenAI({
apiKey: "ollama", // required by the SDK but ignored by Ollama
baseURL: Config.OLLAMA_BASE_URL,
});
this.model = Config.OLLAMA_MODEL;
// Vision models (llava, llama3.2-vision, etc.) support images
this.capabilities = { supportsImages: true, supportsStreaming: true };
} else {
this.client = new OpenAI({ apiKey: Config.OPENAI_API_KEY });
this.model = Config.OPENAI_MODEL;
@@ -646,5 +655,6 @@ export function getLlmProvider(): LLMProvider {
if (Config.LLM_PROVIDER === "openrouter") {
return new OpenRouterProvider();
}
// OpenAI, Groq, and Ollama all use OpenAI-compatible API
return new OpenAIProvider();
}