get opencode free models

This commit is contained in:
2026-01-31 14:15:29 +00:00
parent 76e71d1f8a
commit 8196fb0bf6
2 changed files with 178 additions and 9 deletions

View File

@@ -4,6 +4,7 @@ import { testDB } from "./db/client";
import { cleanupExpiredSessions } from "./db/queries";
import { withAuthedLogging, withLogging } from "./logger";
import { routes } from "./routes";
import { initializeFreeModelsCache } from "./routes/ai/opencode";
const DEV = process.argv.find((arg) => ["--dev", "--developer", "-d"].includes(arg.toLowerCase())) != null;
const PORT = process.argv.find((arg) => arg.toLowerCase().startsWith("--port="))?.split("=")[1] || 0;
@@ -121,6 +122,7 @@ const main = async () => {
console.log(`tnirps (sprint server) listening on ${server.url}`);
await testDB();
await initializeFreeModelsCache();
startSessionCleanup();
};

View File

@@ -5,16 +5,183 @@ export type AIResponse = {
raw: string;
};
export const callAI = async (prompt: string): Promise<AIResponse> => {
const models = [
"opencode/glm-4.7-free",
"opencode/kimi-k2.5-free",
"opencode/minimax-m2.1-free",
"opencode/trinity-large-preview-free",
];
const model = models[3]!;
export interface OpencodeModel {
id: string;
providerID: string;
name: string;
family: string;
api: {
id: string;
url: string;
npm: string;
};
status: string;
headers: Record<string, string>;
options: Record<string, unknown>;
cost: {
input: number;
output: number;
cache: {
read: number;
write: number;
};
};
limit: {
context: number;
output: number;
input?: number;
};
capabilities: {
temperature: boolean;
reasoning: boolean;
attachment: boolean;
toolcall: boolean;
input: {
text: boolean;
audio: boolean;
image: boolean;
video: boolean;
pdf: boolean;
};
output: {
text: boolean;
audio: boolean;
image: boolean;
video: boolean;
pdf: boolean;
};
interleaved: boolean | { field: string };
};
release_date: string;
variants: Record<string, unknown>;
}
const result = Bun.spawn(["opencode", "run", prompt, "--model", model], {
export interface FreeModel {
name: string;
id: string;
}
const ignore = ["gpt-5-nano"];
function parseOpencodeModelsOutput(output: string): OpencodeModel[] {
let models: OpencodeModel[] = [];
const lines = output.split("\n");
let currentModelId: string | null = null;
let jsonBuffer: string[] = [];
for (const line of lines) {
const trimmed = line.trim();
// Check if line starts with "opencode/" (model ID header)
if (trimmed.startsWith("opencode/")) {
// Save previous model if exists
if (currentModelId && jsonBuffer.length > 0) {
try {
const model = JSON.parse(jsonBuffer.join("\n")) as OpencodeModel;
models.push(model);
} catch {
// skip invalid JSON
}
}
currentModelId = trimmed;
jsonBuffer = [];
} else if (trimmed.startsWith("{")) {
jsonBuffer.push(trimmed);
} else if (jsonBuffer.length > 0 && trimmed) {
// Continue accumulating JSON lines
jsonBuffer.push(trimmed);
}
}
// Don't forget the last model
if (currentModelId && jsonBuffer.length > 0) {
try {
const model = JSON.parse(jsonBuffer.join("\n")) as OpencodeModel;
models.push(model);
} catch {
// skip invalid JSON
}
}
models = models.filter((model) => !ignore.includes(model.id));
return models;
}
// cached models storage
let cachedFreeModels: FreeModel[] | null = null;
// fallback models when opencode CLI fails
const FALLBACK_MODELS: FreeModel[] = [
{ name: "GLM 4.7 Free", id: "glm-4.7-free" },
{ name: "Kimi K2.5 Free", id: "kimi-k2.5-free" },
{ name: "MiniMax M2.1 Free", id: "minimax-m2.1-free" },
{ name: "Trinity Large", id: "trinity-large-preview-free" },
];
// initialize the cache by fetching from opencode CLI
export async function initializeFreeModelsCache(): Promise<void> {
try {
const models = await fetchFreeOpencodeModels();
cachedFreeModels = models;
console.log(`loaded ${models.length} free opencode models`);
} catch (error) {
console.error("failed to initialize free models cache:", error);
cachedFreeModels = FALLBACK_MODELS;
}
}
// refresh the cached models
export async function refreshFreeModelsCache(): Promise<FreeModel[]> {
try {
const models = await fetchFreeOpencodeModels();
cachedFreeModels = models;
console.log(`refreshed ${models.length} free opencode models`);
return models;
} catch (error) {
console.error("failed to refresh free models cache:", error);
// keep existing cache if refresh fails
return cachedFreeModels ?? FALLBACK_MODELS;
}
}
// get cached models (returns fallback if not initialized)
export function getCachedFreeModels(): FreeModel[] {
return cachedFreeModels ?? FALLBACK_MODELS;
}
// internal function to actually fetch from CLI
async function fetchFreeOpencodeModels(): Promise<FreeModel[]> {
const proc = Bun.spawn({
cmd: ["opencode", "models", "opencode", "--verbose"],
stdout: "pipe",
stderr: "pipe",
});
const output = await new Response(proc.stdout).text();
const exitCode = await proc.exited;
if (exitCode !== 0) {
const error = await new Response(proc.stderr).text();
console.error("opencode models command failed:", error);
throw new Error("Failed to fetch opencode models");
}
const allModels = parseOpencodeModelsOutput(output);
// filter to free models only (cost.input === 0 && cost.output === 0)
const freeModels = allModels.filter((model) => model.cost.input === 0 && model.cost.output === 0);
// map to the expected format { name, id }
return freeModels.map((model) => ({
name: model.name,
id: model.id,
}));
}
export const callAI = async (prompt: string, model: string): Promise<AIResponse> => {
if (!model.includes("/")) model = `opencode/${model}`;
const result = Bun.spawn(["opencode", "run", prompt, "--model", model, "--title", "SPRINT_AUTOMATED"], {
stdout: "pipe",
stderr: "pipe",
});