make it start working again
Some checks failed
ci/woodpecker/cron/ci Pipeline failed

This commit is contained in:
Madison Grubb
2025-12-11 23:13:07 -05:00
parent dc9ec367a0
commit 96480a351f
10 changed files with 1054 additions and 439 deletions

View File

@@ -1,47 +1,47 @@
const OLLAMA_API_URL = process.env.OLLAMA_API_URL;
const OLLAMA_API_KEY = process.env.OLLAMA_API_KEY;
export const OLLAMA_MODEL = process.env.OLLAMA_MODEL || "gemma3n:e4b";
async function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
// Utility: strip markdown artifacts
function cleanText(str) {
return str
.replace(/^#+\s*/gm, "") // remove headers
.replace(/\*\*(.*?)\*\*/g, "$1") // remove bold
.replace(/[*_`]/g, "") // remove stray formatting
.replace(/\s+/g, " ") // normalize whitespace
.replace(/^#+\s*/gm, "")
.replace(/\*\*(.*?)\*\*/g, "$1")
.replace(/[*_`]/g, "")
.replace(/\s+/g, " ")
.trim();
}
export async function callOllama(prompt, model = "gemma3n:e4b", retries = 5, stepName = "unknown") {
const isUsingOpenWebUI = !!OLLAMA_API_KEY;
function inferApiType(url) {
return url?.includes("/api/chat/completions") ? "open-webui" : "direct";
}
async function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function callOllamaBase(prompt, model, retries, stepName, apiType) {
const isUsingOpenWebUI = apiType === "open-webui";
for (let attempt = 1; attempt <= retries; attempt++) {
try {
const promptCharCount = prompt.length;
const promptWordCount = prompt.split(/\s+/).length;
console.log(`\n[${stepName}] Sending prompt (attempt ${attempt}/${retries})`);
console.log(`Prompt: ${promptCharCount} chars, ~${promptWordCount} words`);
console.log(
`\n[${stepName}] Sending prompt (attempt ${attempt}/${retries})`,
);
console.log(
`Prompt: ${promptCharCount} chars, ~${promptWordCount} words`,
);
const headers = { "Content-Type": "application/json" };
if (isUsingOpenWebUI) {
if (isUsingOpenWebUI && OLLAMA_API_KEY) {
headers["Authorization"] = `Bearer ${OLLAMA_API_KEY}`;
}
const body = isUsingOpenWebUI
? {
model,
messages: [{ role: "user", content: prompt }],
}
: {
model,
messages: [{ role: "user", content: prompt }],
stream: false,
};
? { model, messages: [{ role: "user", content: prompt }] }
: { model, prompt, stream: false };
const response = await fetch(OLLAMA_API_URL, {
method: "POST",
@@ -49,24 +49,24 @@ export async function callOllama(prompt, model = "gemma3n:e4b", retries = 5, ste
body: JSON.stringify(body),
});
if (!response.ok) throw new Error(`Ollama request failed: ${response.status} ${response.statusText}`);
if (!response.ok)
throw new Error(
`Ollama request failed: ${response.status} ${response.statusText}`,
);
const data = await response.json();
const rawText = isUsingOpenWebUI
? data.choices?.[0]?.message?.content
: data.message?.content;
: data.response;
if (!rawText) throw new Error("No response from Ollama");
const cleaned = cleanText(rawText);
console.log(`[${stepName}] Received: ${rawText.length} chars, ~${rawText.split(/\s+/).length} words`);
// console.log(`Raw output:\n${rawText}\n`);
// console.log(`Cleaned output:\n${cleaned}\n`);
console.log(
`[${stepName}] Received: ${rawText.length} chars, ~${rawText.split(/\s+/).length} words`,
);
return cleaned;
} catch (err) {
console.warn(`[${stepName}] Attempt ${attempt} failed: ${err.message}`);
if (attempt === retries) throw err;
@@ -76,3 +76,23 @@ export async function callOllama(prompt, model = "gemma3n:e4b", retries = 5, ste
}
}
}
export async function callOllama(
prompt,
model = OLLAMA_MODEL,
retries = 5,
stepName = "unknown",
) {
const apiType = inferApiType(OLLAMA_API_URL);
return callOllamaBase(prompt, model, retries, stepName, apiType);
}
export async function callOllamaExplicit(
prompt,
model = OLLAMA_MODEL,
retries = 5,
stepName = "unknown",
apiType = "direct",
) {
return callOllamaBase(prompt, model, retries, stepName, apiType);
}