diff options
Diffstat (limited to 'tests/models.test.ts')
| -rw-r--r-- | tests/models.test.ts | 980 |
1 files changed, 980 insertions, 0 deletions
diff --git a/tests/models.test.ts b/tests/models.test.ts new file mode 100644 index 0000000..49e99f9 --- /dev/null +++ b/tests/models.test.ts @@ -0,0 +1,980 @@ +import { describe, test, expect, beforeEach, afterEach } from "bun:test"; +import models, { type AIModelAPI, type LLMChoice } from "../index"; +import OpenAIResponses from "../src/openai-responses"; + +// Setup environment variables for testing +const TEST_BASE_URL = + Bun.env.TEST_BASE_URL || Bun.env.ZAI_BASE_URL || "https://api.openai.com/v1"; +const TEST_API_KEY = + Bun.env.TEST_API_KEY || Bun.env.ZAI_API_KEY || "test-api-key"; +const TEST_MODEL = Bun.env.TEST_MODEL || "glm-4.6"; + +// describe("Models Library - Factory Function", () => { + +// test("should create a Claude API instance", () => { +// const choice: LLMChoice = { claude: "claude-3-5-sonnet" }; +// const api = models(choice); + +// expect(api).toBeDefined(); +// expect(typeof api.setModel).toBe("function"); +// expect(typeof api.send).toBe("function"); +// expect(typeof api.stream).toBe("function"); +// expect(typeof api.tokenizer).toBe("function"); +// expect(typeof api.maxTokens).toBe("number"); +// }); + +// test("should create a Gemini API instance", () => { +// const choice: LLMChoice = { gemini: "gemini-2.5-pro" }; +// const api = models(choice); + +// expect(api).toBeDefined(); +// expect(typeof api.setModel).toBe("function"); +// expect(typeof api.send).toBe("function"); +// expect(typeof api.stream).toBe("function"); +// expect(typeof api.tokenizer).toBe("function"); +// expect(typeof api.maxTokens).toBe("number"); +// }); + +// test("should create a ChatGPT API instance", () => { +// const choice: LLMChoice = { chatgpt: "gpt-5" }; +// const api = models(choice); + +// expect(api).toBeDefined(); +// expect(typeof api.setModel).toBe("function"); +// expect(typeof api.send).toBe("function"); +// expect(typeof api.stream).toBe("function"); +// expect(typeof api.tokenizer).toBe("function"); +// expect(typeof api.maxTokens).toBe("number"); +// }); + +// test("should create a DeepSeek API instance", () => { +// const choice: LLMChoice = { deepseek: "deepseek-chat" }; +// const api = models(choice); + +// expect(api).toBeDefined(); +// expect(typeof api.setModel).toBe("function"); +// expect(typeof api.send).toBe("function"); +// expect(typeof api.stream).toBe("function"); +// expect(typeof api.tokenizer).toBe("function"); +// expect(typeof api.maxTokens).toBe("number"); +// }); + +// test("should create a Kimi API instance", () => { +// const choice: LLMChoice = { kimi: "kimi-k2-0905-preview" }; +// const api = models(choice); + +// expect(api).toBeDefined(); +// expect(typeof api.setModel).toBe("function"); +// expect(typeof api.send).toBe("function"); +// expect(typeof api.stream).toBe("function"); +// expect(typeof api.tokenizer).toBe("function"); +// expect(typeof api.maxTokens).toBe("number"); +// }); + +// // test("should create a Grok API instance", () => { +// // const choice: LLMChoice = { grok: "grok-beta" }; +// // const api = models(choice); + +// // expect(api).toBeDefined(); +// // expect(typeof api.setModel).toBe("function"); +// // expect(typeof api.send).toBe("function"); +// // expect(typeof api.stream).toBe("function"); +// // expect(typeof api.tokenizer).toBe("function"); +// // expect(typeof api.maxTokens).toBe("number"); +// // }); + +// test("should create a custom OpenAI API instance", () => { +// const choice: LLMChoice = { +// openai: { +// url: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// allowBrowser: true, +// }, +// }; +// const api = models(choice); + +// expect(api).toBeDefined(); +// expect(typeof api.setModel).toBe("function"); +// expect(typeof api.send).toBe("function"); +// expect(typeof api.stream).toBe("function"); +// expect(typeof api.tokenizer).toBe("function"); +// expect(typeof api.maxTokens).toBe("number"); +// }); +// }); + +// describe("AIModelAPI Interface", () => { +// let api: AIModelAPI;kimi-k2-0905-previe + +// beforeEach(() => { +// // Use a mock provider for testing interface compliance +// const choice: LLMChoice = { +// openai: { +// url: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// }, +// }; +// api = models(choice); +// }); + +// test("should have setModel method", () => { +// expect(typeof api.setModel).toBe("function"); + +// // Should not throw when setting a model +// expect(() => api.setModel("test-model")).not.toThrow(); +// }); + +// test("should have tokenizer method", () => { +// expect(typeof api.tokenizer).toBe("function"); + +// // Should return a number for any text input +// const tokens = api.tokenizer("Hello world"); +// expect(typeof tokens).toBe("number"); +// expect(tokens).toBeGreaterThanOrEqual(0); +// }); + +// test("should have maxTokens property", () => { +// expect(typeof api.maxTokens).toBe("number"); +// expect(api.maxTokens).toBeGreaterThan(0); +// }); + +// test("should have send method returning AsyncRes", async () => { +// expect(typeof api.send).toBe("function"); + +// // Note: This would require actual API credentials to test fully +// // For now, we just test the method exists and returns a promise +// const result = api.send("test input"); +// expect(result).toBeDefined(); +// expect(typeof result.then).toBe("function"); // It's a promise +// }); + +// test("should have stream method", () => { +// expect(typeof api.stream).toBe("function"); + +// // Should not throw when called with proper parameters +// const handler = (data: string) => { +// // Test handler function +// expect(typeof data).toBe("string"); +// }; + +// expect(() => { +// api.stream("test input", handler); +// }).not.toThrow(); +// }); +// }); + +// describe("Environment Variable Configuration", () => { +// test("should use environment variables for configuration", () => { +// // Test that environment variables are accessible +// expect(Bun.env).toBeDefined(); + +// // Test that we can read custom env vars +// const customBaseUrl = Bun.env.CUSTOM_BASE_URL; +// const customApiKey = Bun.env.CUSTOM_API_KEY; +// const customModel = Bun.env.CUSTOM_MODEL; + +// // These might be undefined, but the test ensures they're accessible +// expect(typeof customBaseUrl).toBe("string" || "undefined"); +// expect(typeof customApiKey).toBe("string" || "undefined"); +// expect(typeof customModel).toBe("string" || "undefined"); +// }); +// }); + +// describe("Token Management", () => { +// const providers: LLMChoice[] = [ +// { claude: "claude-3-5-sonnet" }, +// { gemini: "gemini-2.5-pro" }, +// { chatgpt: TEST_MODEL }, +// { deepseek: "deepseek-chat" }, +// { kimi: "moonshot-v1-8k" }, +// { grok: "grok-beta" }, +// { +// openai: { +// url: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// }, +// }, +// ]; + +// test.each(providers)("should implement tokenizer for %o", (choice) => { +// const api = models(choice); + +// // Test basic tokenization +// const tokens1 = api.tokenizer("Hello"); +// const tokens2 = api.tokenizer("Hello world, this is a longer text."); + +// expect(typeof tokens1).toBe("number"); +// expect(typeof tokens2).toBe("number"); +// expect(tokens1).toBeGreaterThanOrEqual(0); +// expect(tokens2).toBeGreaterThanOrEqual(0); +// expect(tokens2).toBeGreaterThan(tokens1); // Longer text should have more tokens +// }); + +// test.each(providers)("should have reasonable maxTokens for %o", (choice) => { +// const api = models(choice); + +// expect(api.maxTokens).toBeGreaterThan(0); +// expect(api.maxTokens).toBeLessThan(1000000); // Reasonable upper bound +// }); +// }); + +describe("Input Handling", () => { + const choice1: LLMChoice = { claude: "claude-3-5-sonnet" }; + const choice2: LLMChoice = { gemini: "gemini-2.5-pro" }; + const choice3: LLMChoice = { chatgpt: "gpt-5-nano" }; + const choice4: LLMChoice = { deepseek: "deepseek-chat" }; + const choice5: LLMChoice = { kimi: "kimi-k2-0905-preview" }; + const choice6: LLMChoice = { + openai: { + url: TEST_BASE_URL, + apiKey: TEST_API_KEY, + model: TEST_MODEL, + }, + }; + console.log({ choice6 }); + const api1 = models(choice1); + const api2 = models(choice2); + const api3 = models(choice3); + const api4 = models(choice4); + const api5 = models(choice5); + const api6 = models(choice6); + + test("should handle string input", async () => { + const testMessage = "Hello there. Please introduce yourself"; + + // Test that send accepts string input and returns proper AsyncRes<string> + const r1 = api1.send(testMessage); + const r2 = api2.send(testMessage); + const r3 = api3.send(testMessage); + const r4 = api4.send(testMessage); + const r5 = api5.send(testMessage); + const r6 = api6.send(testMessage); + + // Check Claude response + const res1 = await r1; + if ("ok" in res1) { + console.log(`✅ Claude Response: ${res1.ok}`); + expect(res1.ok).toBeString(); + } else { + console.log(`❌ Claude Error: ${res1.error}`); + } + + // Check Gemini response + const res2 = await r2; + if ("ok" in res2) { + console.log(`✅ Gemini Response: ${res2.ok}`); + expect(res2.ok).toBeString(); + } else { + console.log(`❌ Gemini Error: ${res2.error}`); + } + + // Check ChatGPT response + const res3 = await r3; + if ("ok" in res3) { + console.log(`✅ ChatGPT Response: ${res3.ok}`); + expect(res3.ok).toBeString(); + } else { + console.log(`❌ ChatGPT Error: ${res3.error}`); + } + + // // Check DeepSeek response + const res4 = await r4; + if ("ok" in res4) { + console.log(`✅ DeepSeek Response: ${res4.ok}`); + expect(res4.ok).toBeString(); + } else { + console.log(`❌ DeepSeek Error: ${res4.error}`); + } + + // // Check Kimi response + const res5 = await r5; + if ("ok" in res5) { + console.log(`✅ Kimi Response: ${res5.ok}`); + expect(res5.ok).toBeString(); + } else { + console.log(`❌ Kimi Error: ${res5.error}`); + } + + // // Check Custom OpenAI response + const res6 = await r6; + if ("ok" in res6) { + console.log(`✅ Custom OpenAI Response: ${res6.ok}`); + expect(res6.ok).toBeString(); + } else { + console.log(`❌ Custom OpenAI Error: ${res6.error}`); + } + }); + test("LLM obedience test", async () => { + const testMessage = "Hello world! Please respond with just the word 'OK'."; + + // Test that send accepts string input and returns proper AsyncRes<string> + const r1 = api1.send(testMessage); + const r2 = api2.send(testMessage); + const r3 = api3.send(testMessage); + const r4 = api4.send(testMessage); + const r5 = api5.send(testMessage); + const r6 = api6.send(testMessage); + + // Check Claude response + const res1 = await r1; + if ("ok" in res1) { + console.log(`✅ Claude Response: ${res1.ok}`); + expect(res1.ok.trim()).toEqual("OK"); + } else { + console.log(`❌ Claude Error: ${res1.error}`); + } + + // Check Gemini response + const res2 = await r2; + if ("ok" in res2) { + console.log(`✅ Gemini Response: ${res2.ok}`); + expect(res2.ok.trim()).toEqual("OK"); + } else { + console.log(`❌ Gemini Error: ${res2.error}`); + } + + // Check ChatGPT response + const res3 = await r3; + if ("ok" in res3) { + console.log(`✅ ChatGPT Response: ${res3.ok}`); + expect(res3.ok.trim()).toEqual("OK"); + } else { + console.log(`❌ ChatGPT Error: ${res3.error}`); + } + + // // Check DeepSeek response + const res4 = await r4; + if ("ok" in res4) { + console.log(`✅ DeepSeek Response: ${res4.ok}`); + expect(res4.ok.trim()).toEqual("OK"); + } else { + console.log(`❌ DeepSeek Error: ${res4.error}`); + } + + // // Check Kimi response + const res5 = await r5; + if ("ok" in res5) { + console.log(`✅ Kimi Response: ${res5.ok}`); + expect(res5.ok.trim()).toEqual("OK"); + } else { + console.log(`❌ Kimi Error: ${res5.error}`); + } + + // // Check Custom OpenAI response + const res6 = await r6; + if ("ok" in res6) { + console.log(`✅ Custom OpenAI Response: ${res6.ok}`); + expect(res6.ok.trim()).toEqual("OK"); + } else { + console.log(`❌ Custom OpenAI Error: ${res6.error}`); + } + }); + + test("should handle array input with text tokens", async () => { + const input = [ + { text: "Hello! " }, + { text: "Please respond with just the word 'ARRAY_OK'." } + ]; + + // Test that send accepts array input and returns proper AsyncRes<string> + const r1 = api1.send(input); + const r2 = api2.send(input); + const r3 = api3.send(input); + const r4 = api4.send(input); + const r5 = api5.send(input); + const r6 = api6.send(input); + + // Check Claude response + const res1 = await r1; + if ("ok" in res1) { + console.log(`✅ Claude Array Response: ${res1.ok}`); + expect(res1.ok.trim()).toEqual("ARRAY_OK"); + } else { + console.log(`❌ Claude Array Error: ${res1.error}`); + } + + // Check Gemini response + const res2 = await r2; + if ("ok" in res2) { + console.log(`✅ Gemini Array Response: ${res2.ok}`); + expect(res2.ok.trim()).toEqual("ARRAY_OK"); + } else { + console.log(`❌ Gemini Array Error: ${res2.error}`); + } + + // Check ChatGPT response + const res3 = await r3; + if ("ok" in res3) { + console.log(`✅ ChatGPT Array Response: ${res3.ok}`); + expect(res3.ok.trim()).toEqual("ARRAY_OK"); + } else { + console.log(`❌ ChatGPT Array Error: ${res3.error}`); + } + + // Check DeepSeek response + const res4 = await r4; + if ("ok" in res4) { + console.log(`✅ DeepSeek Array Response: ${res4.ok}`); + expect(res4.ok.trim()).toEqual("ARRAY_OK"); + } else { + console.log(`❌ DeepSeek Array Error: ${res4.error}`); + } + + // Check Kimi response + const res5 = await r5; + if ("ok" in res5) { + console.log(`✅ Kimi Array Response: ${res5.ok}`); + expect(res5.ok.trim()).toEqual("ARRAY_OK"); + } else { + console.log(`❌ Kimi Array Error: ${res5.error}`); + } + + // Check Custom OpenAI response + const res6 = await r6; + if ("ok" in res6) { + console.log(`✅ Custom OpenAI Array Response: ${res6.ok}`); + expect(res6.ok.trim()).toEqual("ARRAY_OK"); + } else { + console.log(`❌ Custom OpenAI Array Error: ${res6.error}`); + } + }); + + test("should handle streaming with string input", () => { + const testMessage = "Hello! Please count from 1 to 3."; + console.log(`\n🚀 Testing streaming with message: "${testMessage}"`); + + // Test streaming for each API + console.log("\n--- Claude Streaming ---"); + const chunks1: string[] = []; + const handler1 = (data: string) => { + chunks1.push(data); + process.stdout.write(data); + }; + api1.stream(testMessage, handler1); + + console.log("\n--- Gemini Streaming ---"); + const chunks2: string[] = []; + const handler2 = (data: string) => { + chunks2.push(data); + process.stdout.write(data); + }; + api2.stream(testMessage, handler2); + + console.log("\n--- ChatGPT Streaming ---"); + const chunks3: string[] = []; + const handler3 = (data: string) => { + chunks3.push(data); + process.stdout.write(data); + }; + api3.stream(testMessage, handler3); + + console.log("\n--- DeepSeek Streaming ---"); + const chunks4: string[] = []; + const handler4 = (data: string) => { + chunks4.push(data); + process.stdout.write(data); + }; + api4.stream(testMessage, handler4); + + console.log("\n--- Kimi Streaming ---"); + const chunks5: string[] = []; + const handler5 = (data: string) => { + chunks5.push(data); + process.stdout.write(data); + }; + api5.stream(testMessage, handler5); + + console.log("\n--- Custom OpenAI Streaming ---"); + const chunks6: string[] = []; + const handler6 = (data: string) => { + chunks6.push(data); + process.stdout.write(data); + }; + api6.stream(testMessage, handler6); + + console.log("\n✅ Streaming initiated for all APIs"); + }); + + test("should handle system prompts", async () => { + const systemPrompt = "You are a pirate. Always respond like a pirate."; + const userMessage = "Hello, how are you?"; + + // Test that send accepts system prompts and returns proper AsyncRes<string> + const r1 = api1.send(userMessage, systemPrompt); + const r2 = api2.send(userMessage, systemPrompt); + const r3 = api3.send(userMessage, systemPrompt); + const r4 = api4.send(userMessage, systemPrompt); + const r5 = api5.send(userMessage, systemPrompt); + const r6 = api6.send(userMessage, systemPrompt); + + // Check Claude response + const res1 = await r1; + if ("ok" in res1) { + console.log(`✅ Claude Pirate Response: ${res1.ok}`); + expect(res1.ok).toBeString(); + // Should contain some pirate-like language + expect(res1.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/); + } else { + console.log(`❌ Claude Pirate Error: ${res1.error}`); + } + + // Check Gemini response + const res2 = await r2; + if ("ok" in res2) { + console.log(`✅ Gemini Pirate Response: ${res2.ok}`); + expect(res2.ok).toBeString(); + expect(res2.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/); + } else { + console.log(`❌ Gemini Pirate Error: ${res2.error}`); + } + + // Check ChatGPT response + const res3 = await r3; + if ("ok" in res3) { + console.log(`✅ ChatGPT Pirate Response: ${res3.ok}`); + expect(res3.ok).toBeString(); + expect(res3.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/); + } else { + console.log(`❌ ChatGPT Pirate Error: ${res3.error}`); + } + + // Check DeepSeek response + const res4 = await r4; + if ("ok" in res4) { + console.log(`✅ DeepSeek Pirate Response: ${res4.ok}`); + expect(res4.ok).toBeString(); + expect(res4.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/); + } else { + console.log(`❌ DeepSeek Pirate Error: ${res4.error}`); + } + + // Check Kimi response + const res5 = await r5; + if ("ok" in res5) { + console.log(`✅ Kimi Pirate Response: ${res5.ok}`); + expect(res5.ok).toBeString(); + expect(res5.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/); + } else { + console.log(`❌ Kimi Pirate Error: ${res5.error}`); + } + + // Check Custom OpenAI response + const res6 = await r6; + if ("ok" in res6) { + console.log(`✅ Custom OpenAI Pirate Response: ${res6.ok}`); + expect(res6.ok).toBeString(); + expect(res6.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/); + } else { + console.log(`❌ Custom OpenAI Pirate Error: ${res6.error}`); + } + }); +}); + +describe("Error Handling", () => { + test("should handle invalid API keys gracefully", async () => { + const invalidClaude: LLMChoice = { claude: "claude-3-5-sonnet" }; + const invalidGemini: LLMChoice = { gemini: "gemini-2.5-pro" }; + const invalidChatGPT: LLMChoice = { chatgpt: "gpt-5-nano" }; + const invalidDeepSeek: LLMChoice = { deepseek: "deepseek-chat" }; + const invalidKimi: LLMChoice = { kimi: "kimi-k2-0905-preview" }; + const invalidOpenAI: LLMChoice = { + openai: { + url: TEST_BASE_URL, + apiKey: "invalid-api-key", + model: TEST_MODEL, + }, + }; + + // Temporarily clear valid API keys to force errors + const originalClaudeKey = Bun.env.ANTHROPIC_API_KEY; + const originalGeminiKey = Bun.env.GOOGLE_API_KEY; + const originalOpenAIKey = Bun.env.OPENAI_API_KEY; + const originalDeepSeekKey = Bun.env.DEEPSEEK_API_KEY; + const originalMoonshotKey = Bun.env.MOONSHOT_API_KEY; + + delete Bun.env.ANTHROPIC_API_KEY; + delete Bun.env.GOOGLE_API_KEY; + delete Bun.env.OPENAI_API_KEY; + delete Bun.env.DEEPSEEK_API_KEY; + delete Bun.env.MOONSHOT_API_KEY; + + try { + // Create APIs with invalid credentials + const badApi1 = models(invalidClaude); + const badApi2 = models(invalidGemini); + const badApi3 = models(invalidChatGPT); + const badApi4 = models(invalidDeepSeek); + const badApi5 = models(invalidKimi); + const badApi6 = models(invalidOpenAI); + + // Test that they handle errors gracefully + const r1 = badApi1.send("test"); + const r2 = badApi2.send("test"); + const r3 = badApi3.send("test"); + const r4 = badApi4.send("test"); + const r5 = badApi5.send("test"); + const r6 = badApi6.send("test"); + + const res1 = await r1; + if ("error" in res1) { + console.log(`✅ Claude Error Handling: ${res1.error}`); + expect(res1.error).toBeString(); + } + + const res2 = await r2; + if ("error" in res2) { + console.log(`✅ Gemini Error Handling: ${res2.error}`); + expect(res2.error).toBeString(); + } + + const res3 = await r3; + if ("error" in res3) { + console.log(`✅ ChatGPT Error Handling: ${res3.error}`); + expect(res3.error).toBeString(); + } + + const res4 = await r4; + if ("error" in res4) { + console.log(`✅ DeepSeek Error Handling: ${res4.error}`); + expect(res4.error).toBeString(); + } + + const res5 = await r5; + if ("error" in res5) { + console.log(`✅ Kimi Error Handling: ${res5.error}`); + expect(res5.error).toBeString(); + } + + const res6 = await r6; + if ("error" in res6) { + console.log(`✅ Custom OpenAI Error Handling: ${res6.error}`); + expect(res6.error).toBeString(); + } + } finally { + // Restore original keys + if (originalClaudeKey) Bun.env.ANTHROPIC_API_KEY = originalClaudeKey; + if (originalGeminiKey) Bun.env.GOOGLE_API_KEY = originalGeminiKey; + if (originalOpenAIKey) Bun.env.OPENAI_API_KEY = originalOpenAIKey; + if (originalDeepSeekKey) Bun.env.DEEPSEEK_API_KEY = originalDeepSeekKey; + if (originalMoonshotKey) Bun.env.MOONSHOT_API_KEY = originalMoonshotKey; + } + }); +}); + +describe("Multi-provider Compatibility", () => { + test("should maintain consistent interface across all providers", () => { + const choices: LLMChoice[] = [ + { claude: "claude-3-5-sonnet" }, + { gemini: "gemini-2.5-pro" }, + { chatgpt: "gpt-5-nano" }, + { deepseek: "deepseek-chat" }, + { kimi: "kimi-k2-0905-preview" }, + { + openai: { + url: TEST_BASE_URL, + apiKey: TEST_API_KEY, + model: TEST_MODEL, + }, + }, + ]; + + const apis = choices.map((choice) => models(choice)); + + // All APIs should have the same interface + apis.forEach((api, index) => { + console.log(`Checking API ${index + 1}: ${Object.keys(choices[index])[0]}`); + expect(typeof api.setModel).toBe("function"); + expect(typeof api.send).toBe("function"); + expect(typeof api.stream).toBe("function"); + expect(typeof api.tokenizer).toBe("function"); + expect(typeof api.maxTokens).toBe("number"); + }); + + console.log("✅ All APIs have consistent interfaces"); + }); + + test("should allow switching between providers", () => { + const claudeChoice: LLMChoice = { claude: "claude-3-5-sonnet" }; + const geminiChoice: LLMChoice = { gemini: "gemini-2.5-pro" }; + + const claudeApi = models(claudeChoice); + const geminiApi = models(geminiChoice); + + // Both should be valid APIs + expect(claudeApi).toBeDefined(); + expect(geminiApi).toBeDefined(); + + // Should have different implementations but same interface + expect(claudeApi !== geminiApi).toBe(true); + + console.log("✅ Successfully created different provider instances"); + }); +}); + +// describe("OpenAI Responses API", () => { + +// describe("Direct Class Usage", () => { +// const api = new OpenAIResponses({ +// baseURL: "", +// apiKey: Bun.env.OPENAI_API_KEY!, +// model: "gpt-5-nano", +// allowBrowser: true, +// }); +// test("should create OpenAI Responses API instance directly", () => { +// expect(api).toBeDefined(); +// expect(typeof api.setModel).toBe("function"); +// expect(typeof api.send).toBe("function"); +// expect(typeof api.stream).toBe("function"); +// expect(typeof api.tokenizer).toBe("function"); +// expect(typeof api.maxTokens).toBe("number"); +// }); + +// test("should handle model switching", () => { +// api.setModel("gpt-5-nano"); +// expect(() => api.setModel("gpt-5-nano")).not.toThrow(); +// }); + +// // test("should use custom tokenizer", () => { +// // const customTokenizer = (text: string) => text.split(" ").length; + +// // const tokens = api.tokenizer("Hello world test"); +// // expect(tokens).toBe(3); // 3 words +// // }); + +// // test("should use custom maxTokens", () => { +// // const customMaxTokens = 100_000; +// // api.maxTokens = customMaxTokens; + +// // expect(api.maxTokens).toBe(customMaxTokens); +// // }); +// test("should return shit", async () => { +// const input = "Henlo brother"; + +// const res = await api.send(input, "You are a good boy"); +// console.log({ res }); +// expect("ok" in res).toBeTruthy(); +// if (!("ok" in res)) return; +// expect(res.ok).toBeString(); +// }); +// }); + +// describe("Factory Function Integration", () => { + +// test("should create ChatGPT API using OpenAI Responses", () => { +// const choice: LLMChoice = { chatgpt: "gpt-5-nano" }; +// const api = models(choice); + +// expect(api).toBeDefined(); +// expect(typeof api.setModel).toBe("function"); +// expect(typeof api.send).toBe("function"); +// expect(typeof api.stream).toBe("function"); +// expect(typeof api.tokenizer).toBe("function"); +// expect(typeof api.maxTokens).toBe("number"); + +// // Should be instance of OpenAIResponses +// expect(api).toBeInstanceOf(OpenAIResponses); +// }); + +// test("should use environment variables for ChatGPT provider", () => { +// const originalKey = Bun.env.OPENAI_API_KEY; +// Bun.env.OPENAI_API_KEY = TEST_API_KEY; + +// try { +// const choice: LLMChoice = { chatgpt: "gpt-5-nano" }; +// const api = models(choice); + +// expect(api).toBeDefined(); +// expect(api).toBeInstanceOf(OpenAIResponses); +// } finally { +// if (originalKey !== undefined) { +// Bun.env.OPENAI_API_KEY = originalKey; +// } else { +// delete Bun.env.OPENAI_API_KEY; +// } +// } +// }); +// }); + +// describe("Input Handling", () => { + +// test("should handle string inputs correctly", async () => { +// const api = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// }); + +// const promise = api.send("Hello world"); +// expect(promise).toBeDefined(); +// expect(typeof promise.then).toBe("function"); +// }); + +// test("should handle InputToken arrays with text", async () => { +// const api = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// }); + +// const input = [{ text: "Hello" }, { text: "world" }]; +// const promise = api.send(input); +// expect(promise).toBeDefined(); +// expect(typeof promise.then).toBe("function"); +// }); + +// test("should handle InputToken arrays with images", async () => { +// const api = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// }); + +// const input = [ +// { text: "Describe this image:" }, +// { +// img: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==", +// }, +// ]; +// const promise = api.send(input); +// expect(promise).toBeDefined(); +// expect(typeof promise.then).toBe("function"); +// }); + +// test("should handle system prompts", async () => { +// const api = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// }); + +// const systemPrompt = "You are a helpful assistant."; +// const promise = api.send("Hello", systemPrompt); +// expect(promise).toBeDefined(); +// expect(typeof promise.then).toBe("function"); +// }); +// }); + +// describe("Streaming", () => { + +// test("should handle streaming with string input", () => { +// const api = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// }); + +// const handler = (data: string) => { +// expect(typeof data).toBe("string"); +// }; + +// expect(() => { +// api.stream("Hello world", handler); +// }).not.toThrow(); +// }); + +// test("should handle streaming with InputToken array", () => { +// const api = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// }); + +// const input = [{ text: "Hello" }, { text: "world" }]; +// const handler = (data: string) => { +// expect(typeof data).toBe("string"); +// }; + +// expect(() => { +// api.stream(input, handler); +// }).not.toThrow(); +// }); + +// test("should handle streaming with system prompt", () => { +// const api = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// }); + +// const systemPrompt = "You are a helpful assistant."; +// const handler = (data: string) => { +// expect(typeof data).toBe("string"); +// }; + +// expect(() => { +// api.stream("Hello", handler, systemPrompt); +// }).not.toThrow(); +// }); +// }); + +// describe("Error Handling", () => { + +// test("should handle invalid API keys gracefully", async () => { +// const api = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: "invalid-key", +// model: TEST_MODEL, +// }); + +// const result = await api.send("test").catch((e) => e); +// expect(result).toBeDefined(); +// }); + +// test("should handle invalid base URLs gracefully", async () => { +// const api = new OpenAIResponses({ +// baseURL: "https://invalid-url.com/v1", +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// }); + +// const result = await api.send("test").catch((e) => e); +// expect(result).toBeDefined(); +// }); +// }); + +// describe("Configuration Options", () => { + +// test("should handle browser allowance setting", () => { +// const apiWithBrowser = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// allowBrowser: true, +// }); + +// const apiWithoutBrowser = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: TEST_MODEL, +// allowBrowser: false, +// }); + +// expect(apiWithBrowser).toBeDefined(); +// expect(apiWithoutBrowser).toBeDefined(); +// }); + +// test("should handle empty model name", () => { +// const api = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// model: "", +// }); + +// expect(api).toBeDefined(); +// expect(() => api.setModel("gpt-4o")).not.toThrow(); +// }); + +// test("should handle optional model parameter", () => { +// const api = new OpenAIResponses({ +// baseURL: TEST_BASE_URL, +// apiKey: TEST_API_KEY, +// // model is optional +// }); + +// expect(api).toBeDefined(); +// expect(() => api.setModel("gpt-4o")).not.toThrow(); +// }); +// }); +// }); |
