264 lines
8.4 KiB
JavaScript
264 lines
8.4 KiB
JavaScript
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
|
import {
|
|
cleanText,
|
|
inferApiType,
|
|
callOllama,
|
|
callOllamaExplicit,
|
|
initializeModel,
|
|
OLLAMA_MODEL,
|
|
} from "../../src/ollamaClient.js";
|
|
|
|
describe("cleanText", () => {
|
|
it("strips markdown headers", () => {
|
|
expect(cleanText("# Title")).toBe("Title");
|
|
expect(cleanText("## Sub")).toBe("Sub");
|
|
});
|
|
|
|
it("replaces bold with plain text", () => {
|
|
expect(cleanText("**bold**")).toBe("bold");
|
|
});
|
|
|
|
it("removes asterisks and underscores", () => {
|
|
expect(cleanText("*a* _b_")).toBe("a b");
|
|
});
|
|
|
|
it("collapses whitespace to single spaces and trims", () => {
|
|
expect(cleanText(" a b \n c ")).toBe("a b c");
|
|
});
|
|
});
|
|
|
|
describe("inferApiType", () => {
|
|
it("returns ollama-generate for null/undefined/empty string", () => {
|
|
expect(inferApiType(null)).toBe("ollama-generate");
|
|
expect(inferApiType(undefined)).toBe("ollama-generate");
|
|
expect(inferApiType("")).toBe("ollama-generate");
|
|
});
|
|
|
|
it("returns open-webui for URL with /api/chat/completions", () => {
|
|
expect(inferApiType("http://host/api/chat/completions")).toBe("open-webui");
|
|
});
|
|
|
|
it("returns ollama-chat for URL with /api/chat", () => {
|
|
expect(inferApiType("http://host/api/chat")).toBe("ollama-chat");
|
|
});
|
|
|
|
it("returns ollama-generate for plain base URL", () => {
|
|
expect(inferApiType("http://localhost:11434")).toBe("ollama-generate");
|
|
});
|
|
});
|
|
|
|
describe("callOllama (mocked fetch)", () => {
|
|
const originalFetch = globalThis.fetch;
|
|
const originalEnv = process.env.OLLAMA_API_URL;
|
|
|
|
beforeEach(() => {
|
|
process.env.OLLAMA_API_URL = "http://localhost:11434";
|
|
globalThis.fetch = vi.fn();
|
|
});
|
|
|
|
afterEach(() => {
|
|
process.env.OLLAMA_API_URL = originalEnv;
|
|
globalThis.fetch = originalFetch;
|
|
});
|
|
|
|
it("returns cleaned text from ollama-generate response", async () => {
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: true,
|
|
json: () => Promise.resolve({ response: "**Hello** world" }),
|
|
});
|
|
const result = await callOllama("Hi", undefined, 1, "test");
|
|
expect(result).toBe("Hello world");
|
|
});
|
|
|
|
it("throws on non-ok response", async () => {
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: false,
|
|
status: 500,
|
|
statusText: "Error",
|
|
text: () => Promise.resolve("server error"),
|
|
});
|
|
await expect(callOllama("Hi", undefined, 1, "test")).rejects.toThrow("Ollama request failed");
|
|
});
|
|
|
|
it("throws on non-ok response when response.text() rejects", async () => {
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: false,
|
|
status: 502,
|
|
statusText: "Bad Gateway",
|
|
text: () => Promise.reject(new Error("body read error")),
|
|
});
|
|
await expect(callOllama("Hi", undefined, 1, "test")).rejects.toThrow("Ollama request failed");
|
|
});
|
|
|
|
it("retries on failure then succeeds", async () => {
|
|
vi.mocked(globalThis.fetch)
|
|
.mockRejectedValueOnce(new Error("network error"))
|
|
.mockResolvedValueOnce({
|
|
ok: true,
|
|
json: () => Promise.resolve({ response: "Retry ok" }),
|
|
});
|
|
const result = await callOllama("Hi", undefined, 2, "test");
|
|
expect(result).toBe("Retry ok");
|
|
expect(globalThis.fetch).toHaveBeenCalledTimes(2);
|
|
});
|
|
});
|
|
|
|
describe("callOllamaExplicit (mocked fetch)", () => {
|
|
const originalFetch = globalThis.fetch;
|
|
const originalUrl = process.env.OLLAMA_API_URL;
|
|
const originalKey = process.env.OLLAMA_API_KEY;
|
|
|
|
beforeEach(() => {
|
|
globalThis.fetch = vi.fn();
|
|
});
|
|
|
|
afterEach(() => {
|
|
process.env.OLLAMA_API_URL = originalUrl;
|
|
process.env.OLLAMA_API_KEY = originalKey;
|
|
globalThis.fetch = originalFetch;
|
|
});
|
|
|
|
it("returns content from open-webui response shape", async () => {
|
|
process.env.OLLAMA_API_URL = "http://host/api/chat/completions";
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: true,
|
|
json: () =>
|
|
Promise.resolve({
|
|
choices: [{ message: { content: "**Open** answer" } }],
|
|
}),
|
|
});
|
|
const result = await callOllamaExplicit(
|
|
"Hi",
|
|
"model",
|
|
1,
|
|
"test",
|
|
"open-webui"
|
|
);
|
|
expect(result).toBe("Open answer");
|
|
});
|
|
|
|
it("sends Authorization header when open-webui and OLLAMA_API_KEY set", async () => {
|
|
process.env.OLLAMA_API_URL = "http://host/api/chat/completions";
|
|
process.env.OLLAMA_API_KEY = "secret-key";
|
|
process.env.OLLAMA_MODEL = "";
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: true,
|
|
json: () =>
|
|
Promise.resolve({
|
|
choices: [{ message: { content: "ok" } }],
|
|
}),
|
|
});
|
|
await callOllamaExplicit("Hi", "model", 1, "test", "open-webui");
|
|
const [, opts] = vi.mocked(globalThis.fetch).mock.calls[0];
|
|
expect(opts?.headers?.Authorization).toBe("Bearer secret-key");
|
|
});
|
|
|
|
it("returns content from ollama-chat response shape", async () => {
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: true,
|
|
json: () =>
|
|
Promise.resolve({ message: { content: "Chat **reply**" } }),
|
|
});
|
|
const result = await callOllamaExplicit(
|
|
"Hi",
|
|
"model",
|
|
1,
|
|
"test",
|
|
"ollama-chat"
|
|
);
|
|
expect(result).toBe("Chat reply");
|
|
});
|
|
|
|
it("throws when response has no content", async () => {
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: true,
|
|
json: () => Promise.resolve({}),
|
|
});
|
|
await expect(
|
|
callOllamaExplicit("Hi", "model", 1, "test", "ollama-generate")
|
|
).rejects.toThrow("No response from Ollama");
|
|
});
|
|
});
|
|
|
|
describe("initializeModel (mocked fetch)", () => {
|
|
const originalFetch = globalThis.fetch;
|
|
const originalEnv = process.env.OLLAMA_API_URL;
|
|
const originalOllamaModel = process.env.OLLAMA_MODEL;
|
|
|
|
beforeEach(() => {
|
|
process.env.OLLAMA_API_URL = "http://localhost:11434";
|
|
process.env.OLLAMA_MODEL = "";
|
|
globalThis.fetch = vi.fn();
|
|
});
|
|
|
|
afterEach(() => {
|
|
process.env.OLLAMA_API_URL = originalEnv;
|
|
process.env.OLLAMA_MODEL = originalOllamaModel;
|
|
globalThis.fetch = originalFetch;
|
|
});
|
|
|
|
it("does not fetch when OLLAMA_MODEL is set", async () => {
|
|
process.env.OLLAMA_MODEL = "existing-model";
|
|
await initializeModel();
|
|
expect(globalThis.fetch).not.toHaveBeenCalled();
|
|
});
|
|
|
|
it("leaves OLLAMA_MODEL unchanged when fetch returns not ok", async () => {
|
|
process.env.OLLAMA_MODEL = "";
|
|
const before = OLLAMA_MODEL;
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: false,
|
|
status: 404,
|
|
json: () => Promise.resolve({}),
|
|
});
|
|
await initializeModel();
|
|
expect(OLLAMA_MODEL).toBe(before);
|
|
});
|
|
|
|
it("fetches /api/tags when OLLAMA_MODEL not set", async () => {
|
|
process.env.OLLAMA_MODEL = "";
|
|
process.env.OLLAMA_API_URL = "http://localhost:11434";
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: true,
|
|
json: () => Promise.resolve({ models: [{ name: "test-model" }] }),
|
|
});
|
|
await initializeModel();
|
|
expect(globalThis.fetch).toHaveBeenCalled();
|
|
const [url, opts] = vi.mocked(globalThis.fetch).mock.calls[0];
|
|
expect(String(url)).toMatch(/\/api\/tags$/);
|
|
expect(opts?.method || "GET").toBe("GET");
|
|
});
|
|
|
|
it("fetches /api/v1/models when URL has open-webui path and sets model from data.data id", async () => {
|
|
process.env.OLLAMA_MODEL = "";
|
|
process.env.OLLAMA_API_URL = "http://host/api/chat/completions";
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: true,
|
|
json: () => Promise.resolve({ data: [{ id: "webui-model" }] }),
|
|
});
|
|
await initializeModel();
|
|
const [url] = vi.mocked(globalThis.fetch).mock.calls[0];
|
|
expect(String(url)).toMatch(/\/api\/v1\/models$/);
|
|
expect(OLLAMA_MODEL).toBe("webui-model");
|
|
});
|
|
|
|
it("sets model from data.data[0].name when id missing", async () => {
|
|
process.env.OLLAMA_MODEL = "";
|
|
process.env.OLLAMA_API_URL = "http://host/api/chat/completions";
|
|
vi.mocked(globalThis.fetch).mockResolvedValueOnce({
|
|
ok: true,
|
|
json: () => Promise.resolve({ data: [{ name: "webui-model-name" }] }),
|
|
});
|
|
await initializeModel();
|
|
expect(OLLAMA_MODEL).toBe("webui-model-name");
|
|
});
|
|
|
|
it("catches fetch failure and warns", async () => {
|
|
vi.mocked(globalThis.fetch).mockRejectedValueOnce(new Error("network"));
|
|
const warn = vi.spyOn(console, "warn").mockImplementation(() => {});
|
|
await initializeModel();
|
|
expect(warn).toHaveBeenCalledWith(expect.stringContaining("Could not fetch default model"));
|
|
warn.mockRestore();
|
|
});
|
|
});
|