use lighter model
All checks were successful
ci/woodpecker/push/ci Pipeline was successful

This commit is contained in:
2025-08-30 02:01:15 -04:00
parent 5fdbbf3293
commit 3ce843a417

View File

@@ -5,7 +5,7 @@ async function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
async function callOllama(prompt, model = "gemma3:4b", retries = 6) {
async function callOllama(prompt, model = "gemma3n:e4b", retries = 6) {
for (let attempt = 1; attempt <= retries; attempt++) {
try {
const response = await fetch(OLLAMA_API_URL, {
@@ -112,7 +112,7 @@ ${refined}
for (let attempt = 1; attempt <= maxJsonRetries; attempt++) {
try {
console.log(`📦 JSON pass (attempt ${attempt}/${maxJsonRetries})...`);
jsonText = await callOllama(jsonPrompt, "gemma3:4b", 6);
jsonText = await callOllama(jsonPrompt, "gemma3n:e4b", 6);
const cleaned = jsonText.replace(/```json|```/g, "").trim();
const result = JSON.parse(cleaned);
console.log("🎉 Dungeon generation complete!");