summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorpolwex <polwex@sortug.com>2025-10-19 12:54:25 +0700
committerpolwex <polwex@sortug.com>2025-10-19 12:54:25 +0700
commit8815d3c1d40550470c5bc972bc16bd4966735154 (patch)
tree92ef606b568035b9e88d89286be3330f4b84af1e /tests
parentba16ebcbe36c1a1cbdb1d1379cb3f9c3a086acdf (diff)
new openai responses api and some claude made testsHEADmaster
Diffstat (limited to 'tests')
-rw-r--r--tests/example.ts279
-rw-r--r--tests/index.test.ts61
-rw-r--r--tests/integration.test.ts481
-rw-r--r--tests/models.test.ts980
-rw-r--r--tests/performance.test.ts465
-rw-r--r--tests/setup.ts180
-rw-r--r--tests/test.ts25
7 files changed, 2385 insertions, 86 deletions
diff --git a/tests/example.ts b/tests/example.ts
new file mode 100644
index 0000000..568f5ce
--- /dev/null
+++ b/tests/example.ts
@@ -0,0 +1,279 @@
+/**
+ * Example usage of the models library
+ * This file demonstrates how to use the LLM routing library with different providers
+ */
+
+import models, { AIModelAPI, LLMChoice } from '../index';
+import OpenAIResponses from '../src/openai-responses';
+
+// Example configurations for different providers
+const examples = {
+ // Claude example
+ claude: {
+ description: 'Claude (Anthropic) example',
+ setup: (): LLMChoice => ({ claude: 'claude-3-5-sonnet' }),
+ envVars: ['ANTHROPIC_API_KEY']
+ },
+
+ // Gemini example
+ gemini: {
+ description: 'Gemini (Google) example',
+ setup: (): LLMChoice => ({ gemini: 'gemini-2.5-pro' }),
+ envVars: ['GOOGLE_API_KEY']
+ },
+
+ // ChatGPT example (using new OpenAI Responses API)
+ chatgpt: {
+ description: 'ChatGPT (OpenAI Responses API) example',
+ setup: (): LLMChoice => ({ chatgpt: 'gpt-4o' }),
+ envVars: ['OPENAI_API_KEY']
+ },
+
+ // Direct OpenAI Responses API example
+ openaiResponses: {
+ description: 'Direct OpenAI Responses API example',
+ setup: (): AIModelAPI => new OpenAIResponses({
+ baseURL: Bun.env.ZAI_BASE_URL || 'https://api.openai.com/v1',
+ apiKey: Bun.env.ZAI_API_KEY || Bun.env.OPENAI_API_KEY || 'your-api-key-here',
+ model: Bun.env.TEST_MODEL || 'gpt-4o',
+ allowBrowser: true,
+ tokenizer: (text: string) => text.length / 4, // Custom tokenizer
+ maxTokens: 4000 // Custom max tokens
+ }),
+ envVars: ['ZAI_BASE_URL', 'ZAI_API_KEY', 'OPENAI_API_KEY', 'TEST_MODEL']
+ },
+
+ // DeepSeek example
+ deepseek: {
+ description: 'DeepSeek example',
+ setup: (): LLMChoice => ({ deepseek: 'deepseek-chat' }),
+ envVars: ['DEEPSEEK_API_KEY']
+ },
+
+ // Kimi example
+ kimi: {
+ description: 'Kimi (Moonshot) example',
+ setup: (): LLMChoice => ({ kimi: 'moonshot-v1-8k' }),
+ envVars: ['MOONSHOT_API_KEY']
+ },
+
+ // Grok example
+ grok: {
+ description: 'Grok (X.AI) example',
+ setup: (): LLMChoice => ({ grok: 'grok-beta' }),
+ envVars: ['XAI_API_KEY']
+ },
+
+ // Custom OpenAI-compatible API example
+ custom: {
+ description: 'Custom OpenAI-compatible API example',
+ setup: (): LLMChoice => ({
+ openai: {
+ url: Bun.env.ZAI_BASE_URL || 'https://api.openai.com/v1',
+ apiKey: Bun.env.ZAI_API_KEY || 'your-api-key-here',
+ model: Bun.env.TEST_MODEL || 'glm-4.6',
+ allowBrowser: true
+ }
+ }),
+ envVars: ['ZAI_BASE_URL', 'ZAI_API_KEY', 'TEST_MODEL']
+ }
+};
+
+// Run an example with a specific provider
+async function runExample(providerName: keyof typeof examples) {
+ const example = examples[providerName];
+
+ console.log(`\n=== ${example.description} ===`);
+
+ // Check required environment variables
+ const missingVars = example.envVars.filter(varName => !Bun.env[varName]);
+ if (missingVars.length > 0) {
+ console.warn(`Warning: Missing environment variables: ${missingVars.join(', ')}`);
+ console.warn('The example will be created but API calls will likely fail.');
+ }
+
+ try {
+ // Create the API instance
+ const setupResult = example.setup();
+ let api: AIModelAPI;
+ let configInfo: any;
+
+ // Check if the setup returns an LLMChoice or direct AIModelAPI instance
+ if (typeof setupResult === 'object' && 'send' in setupResult) {
+ // Direct AIModelAPI instance
+ api = setupResult;
+ configInfo = 'Direct API instance';
+ } else {
+ // LLMChoice that needs to be passed to models()
+ const choice = setupResult as LLMChoice;
+ api = models(choice);
+ configInfo = choice;
+ }
+
+ console.log(`✅ API instance created successfully`);
+ console.log(` Max tokens: ${api.maxTokens}`);
+ console.log(` Model: ${JSON.stringify(configInfo)}`);
+
+ // Check if it's an OpenAI Responses API instance
+ if (api instanceof OpenAIResponses) {
+ console.log(` Using OpenAI Responses API directly`);
+ }
+
+ // Test tokenization
+ const testText = 'Hello, how are you today?';
+ const tokens = api.tokenizer(testText);
+ console.log(` Tokenization: "${testText}" -> ${tokens} tokens`);
+
+ // Test simple API call (will fail without valid credentials)
+ console.log(` Testing API call...`);
+ try {
+ const response = await api.send('Hello! Please respond with just "API working".');
+ console.log(` ✅ API response: ${response.substring(0, 100)}${response.length > 100 ? '...' : ''}`);
+ } catch (error) {
+ console.log(` ❌ API call failed: ${error.message}`);
+ }
+
+ // Test streaming
+ console.log(` Testing streaming...`);
+ try {
+ const chunks: string[] = [];
+ const handler = (chunk: string) => {
+ chunks.push(chunk);
+ process.stdout.write('.');
+ };
+
+ await new Promise<void>((resolve) => {
+ api.stream('Count from 1 to 3', handler);
+ setTimeout(() => {
+ console.log(`\n ✅ Streaming completed (${chunks.length} chunks)`);
+ resolve();
+ }, 3000);
+ });
+ } catch (error) {
+ console.log(` ❌ Streaming failed: ${error.message}`);
+ }
+
+ } catch (error) {
+ console.error(`❌ Failed to create API instance: ${error.message}`);
+ }
+}
+
+// Run all examples
+async function runAllExamples() {
+ console.log('🚀 Running LLM Models Library Examples\n');
+
+ console.log('Environment Variables:');
+ Object.keys(Bun.env)
+ .filter(key => key.includes('API_KEY') || key.includes('BASE_URL') || key.includes('MODEL'))
+ .forEach(key => {
+ const value = Bun.env[key];
+ console.log(` ${key}: ${value ? '***set***' : 'not set'}`);
+ });
+
+ for (const providerName of Object.keys(examples) as (keyof typeof examples)[]) {
+ await runExample(providerName);
+ }
+
+ console.log('\n✨ All examples completed!');
+}
+
+// Interactive example selector
+async function interactiveExample() {
+ console.log('\n📋 Available providers:');
+ Object.entries(examples).forEach(([key, example]) => {
+ console.log(` ${key}: ${example.description}`);
+ });
+
+ const provider = process.argv[2];
+ if (provider && provider in examples) {
+ await runExample(provider as keyof typeof examples);
+ } else {
+ console.log('\nUsage: bun run example.ts [provider]');
+ console.log('Available providers:', Object.keys(examples).join(', '));
+ console.log('Or run without arguments to see all examples');
+ await runAllExamples();
+ }
+}
+
+// Test tokenization accuracy across providers
+async function testTokenization() {
+ console.log('\n🔢 Testing Tokenization Across Providers\n');
+
+ const testTexts = [
+ 'Hello world',
+ 'The quick brown fox jumps over the lazy dog.',
+ 'This is a longer text with multiple sentences. It should have more tokens than shorter texts.',
+ 'Special chars: !@#$%^&*()_+-=[]{}|;:,.<>?/~`',
+ 'Unicode: Hello 🌍! 测试中文! Тест на русском! العربية!'
+ ];
+
+ for (const providerName of Object.keys(examples) as (keyof typeof examples)[]) {
+ try {
+ const example = examples[providerName];
+ const choice = example.setup();
+ const api = models(choice);
+
+ console.log(`${providerName}:`);
+ testTexts.forEach(text => {
+ const tokens = api.tokenizer(text);
+ console.log(` "${text.substring(0, 50)}${text.length > 50 ? '...' : ''}" -> ${tokens} tokens`);
+ });
+ console.log('');
+ } catch (error) {
+ console.log(`${providerName}: Failed to initialize - ${error.message}\n`);
+ }
+ }
+}
+
+// Performance benchmark
+async function performanceBenchmark() {
+ console.log('\n⚡ Performance Benchmark\n');
+
+ const testText = 'The quick brown fox jumps over the lazy dog. ';
+ const iterations = 1000;
+
+ for (const providerName of Object.keys(examples) as (keyof typeof examples)[]) {
+ try {
+ const example = examples[providerName];
+ const choice = example.setup();
+ const api = models(choice);
+
+ const startTime = performance.now();
+ for (let i = 0; i < iterations; i++) {
+ api.tokenizer(testText + i);
+ }
+ const endTime = performance.now();
+
+ const totalTime = endTime - startTime;
+ const avgTime = totalTime / iterations;
+
+ console.log(`${providerName}:`);
+ console.log(` ${iterations} tokenizations in ${totalTime.toFixed(2)}ms`);
+ console.log(` Average: ${avgTime.toFixed(3)}ms per tokenization`);
+ console.log('');
+ } catch (error) {
+ console.log(`${providerName}: Failed to benchmark - ${error.message}\n`);
+ }
+ }
+}
+
+// Main execution
+if (import.meta.main) {
+ const command = process.argv[2];
+
+ switch (command) {
+ case 'tokenize':
+ await testTokenization();
+ break;
+ case 'benchmark':
+ await performanceBenchmark();
+ break;
+ case 'all':
+ await runAllExamples();
+ break;
+ default:
+ await interactiveExample();
+ }
+}
+
+export { examples, runExample, runAllExamples, testTokenization, performanceBenchmark }; \ No newline at end of file
diff --git a/tests/index.test.ts b/tests/index.test.ts
deleted file mode 100644
index 1c328fa..0000000
--- a/tests/index.test.ts
+++ /dev/null
@@ -1,61 +0,0 @@
-import { describe, expect, test } from "bun:test";
-import selectModel from "../index";
-
-describe("Model selector smoke tests", () => {
- // test("chatgpt model instantiates correctly and basic send works", async () => {
- // const api = selectModel({ chatgpt: "" });
- // expect(api).toBeDefined();
- // expect(typeof api.send).toBe("function");
- // expect(typeof api.stream).toBe("function");
- // expect(typeof api.setModel).toBe("function");
- // expect(typeof api.tokenizer).toBe("function");
- // expect(typeof api.maxTokens).toBe("number");
- // const res = await api.send("hello");
- // expect(res).toBeTruthy();
- // expect(typeof (res.ok ?? res.error)).toBe("string");
- // });
- // test("gemini model instantiates correctly and basic send works", async () => {
- // const api = selectModel({ gemini: "" });
- // expect(api).toBeDefined();
- // expect(typeof api.send).toBe("function");
- // expect(typeof api.stream).toBe("function");
- // const res = await api.send("hello");
- // console.log({ res });
- // });
- // test("claude model instantiates correctly and basic send works", async () => {
- // const api = selectModel({ claude: "" });
- // expect(api).toBeDefined();
- // expect(typeof api.send).toBe("function");
- // expect(typeof api.stream).toBe("function");
- // const res = await api.send("hello");
- // expect(res).toBeTruthy();
- // expect(typeof (res.ok ?? res.error)).toBe("string");
- // });
- // test("deepseek model instantiates correctly and basic send works", async () => {
- // const api = selectModel({ deepseek: "" });
- // expect(api).toBeDefined();
- // expect(typeof api.send).toBe("function");
- // expect(typeof api.stream).toBe("function");
- // const res = await api.send("hello");
- // expect(res).toBeTruthy();
- // expect(typeof (res.ok ?? res.error)).toBe("string");
- // });
- // test("kimi model instantiates correctly and basic send works", async () => {
- // const api = selectModel({ kimi: "" });
- // expect(api).toBeDefined();
- // expect(typeof api.send).toBe("function");
- // expect(typeof api.stream).toBe("function");
- // const res = await api.send("hello");
- // expect(res).toBeTruthy();
- // expect(typeof (res.ok ?? res.error)).toBe("string");
- // });
- // test("grok model instantiates correctly and basic send works", async () => {
- // const api = selectModel({ grok: "" });
- // expect(api).toBeDefined();
- // expect(typeof api.send).toBe("function");
- // expect(typeof api.stream).toBe("function");
- // const res = await api.send("hello");
- // expect(res).toBeTruthy();
- // expect(typeof (res.ok ?? res.error)).toBe("string");
- // });
-});
diff --git a/tests/integration.test.ts b/tests/integration.test.ts
new file mode 100644
index 0000000..b8abea0
--- /dev/null
+++ b/tests/integration.test.ts
@@ -0,0 +1,481 @@
+import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
+import models, { AIModelAPI, LLMChoice, InputToken } from '../index';
+import OpenAIResponses from '../src/openai-responses';
+
+// Advanced integration tests that require actual API keys
+// These tests are designed to run with real API credentials when available
+
+// Environment variables for integration testing
+const INTEGRATION_CLAUDE_KEY = Bun.env.CLAUDE_API_KEY;
+const INTEGRATION_OPENAI_KEY = Bun.env.OPENAI_API_KEY;
+const INTEGRATION_GEMINI_KEY = Bun.env.GEMINI_API_KEY;
+
+// Skip integration tests if no API keys are available
+const runIntegrationTests = INTEGRATION_CLAUDE_KEY || INTEGRATION_OPENAI_KEY || INTEGRATION_GEMINI_KEY;
+
+describe.runIf(runIntegrationTests)('Integration Tests - Real API Calls', () => {
+ describe('Claude Integration', () => {
+ test.skipUnless(INTEGRATION_CLAUDE_KEY)('should make a real API call to Claude', async () => {
+ // Temporarily set the API key
+ const originalKey = process.env.ANTHROPIC_API_KEY;
+ process.env.ANTHROPIC_API_KEY = INTEGRATION_CLAUDE_KEY;
+
+ try {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const response = await api.send('Hello! Please respond with just the word "SUCCESS".');
+
+ expect(response).toBeDefined();
+ expect(typeof response).toBe('string');
+ expect(response.toLowerCase()).toContain('success');
+ } finally {
+ // Restore original key
+ if (originalKey !== undefined) {
+ process.env.ANTHROPIC_API_KEY = originalKey;
+ } else {
+ delete process.env.ANTHROPIC_API_KEY;
+ }
+ }
+ });
+
+ test.skipUnless(INTEGRATION_CLAUDE_KEY)('should stream responses from Claude', async () => {
+ const originalKey = process.env.ANTHROPIC_API_KEY;
+ process.env.ANTHROPIC_API_KEY = INTEGRATION_CLAUDE_KEY;
+
+ try {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const chunks: string[] = [];
+ const handler = (data: string) => {
+ chunks.push(data);
+ };
+
+ await new Promise<void>((resolve, reject) => {
+ try {
+ api.stream('Count from 1 to 5, one number per line.', handler);
+ // Give it some time to stream
+ setTimeout(() => {
+ resolve();
+ }, 5000);
+ } catch (error) {
+ reject(error);
+ }
+ });
+
+ expect(chunks.length).toBeGreaterThan(0);
+ expect(chunks.join('')).toContain('1');
+ } finally {
+ if (originalKey !== undefined) {
+ process.env.ANTHROPIC_API_KEY = originalKey;
+ } else {
+ delete process.env.ANTHROPIC_API_KEY;
+ }
+ }
+ });
+ });
+
+ describe('OpenAI Integration (Responses API)', () => {
+ test.skipUnless(INTEGRATION_OPENAI_KEY)('should make a real API call using OpenAI Responses API', async () => {
+ const originalKey = process.env.OPENAI_API_KEY;
+ process.env.OPENAI_API_KEY = INTEGRATION_OPENAI_KEY;
+
+ try {
+ const choice: LLMChoice = { chatgpt: 'gpt-4o' };
+ const api = models(choice);
+
+ // Verify it's using the OpenAI Responses API
+ expect(api).toBeInstanceOf(OpenAIResponses);
+
+ const response = await api.send('Hello! Please respond with just the word "SUCCESS".');
+
+ expect(response).toBeDefined();
+ expect(typeof response).toBe('string');
+ expect(response.toLowerCase()).toContain('success');
+ } finally {
+ if (originalKey !== undefined) {
+ process.env.OPENAI_API_KEY = originalKey;
+ } else {
+ delete process.env.OPENAI_API_KEY;
+ }
+ }
+ });
+
+ test.skipUnless(INTEGRATION_OPENAI_KEY)('should stream responses using OpenAI Responses API', async () => {
+ const originalKey = process.env.OPENAI_API_KEY;
+ process.env.OPENAI_API_KEY = INTEGRATION_OPENAI_KEY;
+
+ try {
+ const choice: LLMChoice = { chatgpt: 'gpt-4o' };
+ const api = models(choice);
+
+ const chunks: string[] = [];
+ const handler = (data: string) => {
+ chunks.push(data);
+ };
+
+ await new Promise<void>((resolve, reject) => {
+ try {
+ api.stream('Count from 1 to 5, one number per line.', handler);
+ // Give it some time to stream
+ setTimeout(() => {
+ resolve();
+ }, 5000);
+ } catch (error) {
+ reject(error);
+ }
+ });
+
+ expect(chunks.length).toBeGreaterThan(0);
+ const fullResponse = chunks.join('');
+ expect(fullResponse).toContain('1');
+ } finally {
+ if (originalKey !== undefined) {
+ process.env.OPENAI_API_KEY = originalKey;
+ } else {
+ delete process.env.OPENAI_API_KEY;
+ }
+ }
+ });
+
+ test.skipUnless(INTEGRATION_OPENAI_KEY)('should handle multimodal input with OpenAI Responses API', async () => {
+ const originalKey = process.env.OPENAI_API_KEY;
+ process.env.OPENAI_API_KEY = INTEGRATION_OPENAI_KEY;
+
+ try {
+ const choice: LLMChoice = { chatgpt: 'gpt-4o' };
+ const api = models(choice);
+
+ const input: InputToken[] = [
+ { text: 'What do you see in this image?' },
+ { img: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==' } // Simple red pixel
+ ];
+
+ const response = await api.send(input);
+ expect(response).toBeDefined();
+ expect(typeof response).toBe('string');
+ } finally {
+ if (originalKey !== undefined) {
+ process.env.OPENAI_API_KEY = originalKey;
+ } else {
+ delete process.env.OPENAI_API_KEY;
+ }
+ }
+ });
+
+ test.skipUnless(INTEGRATION_OPENAI_KEY)('should handle system prompts with OpenAI Responses API', async () => {
+ const originalKey = process.env.OPENAI_API_KEY;
+ process.env.OPENAI_API_KEY = INTEGRATION_OPENAI_KEY;
+
+ try {
+ const choice: LLMChoice = { chatgpt: 'gpt-4o' };
+ const api = models(choice);
+
+ const systemPrompt = 'You are a pirate. Always respond like a pirate.';
+ const response = await api.send('Hello, how are you?', systemPrompt);
+
+ expect(response).toBeDefined();
+ expect(typeof response).toBe('string');
+ // Should contain some pirate-like language
+ expect(response.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr/);
+ } finally {
+ if (originalKey !== undefined) {
+ process.env.OPENAI_API_KEY = originalKey;
+ } else {
+ delete process.env.OPENAI_API_KEY;
+ }
+ }
+ });
+ });
+
+ describe('Gemini Integration', () => {
+ test.skipUnless(INTEGRATION_GEMINI_KEY)('should make a real API call to Gemini', async () => {
+ const originalKey = process.env.GOOGLE_API_KEY;
+ process.env.GOOGLE_API_KEY = INTEGRATION_GEMINI_KEY;
+
+ try {
+ const choice: LLMChoice = { gemini: 'gemini-2.5-pro' };
+ const api = models(choice);
+
+ const response = await api.send('Hello! Please respond with just the word "SUCCESS".');
+
+ expect(response).toBeDefined();
+ expect(typeof response).toBe('string');
+ expect(response.toLowerCase()).toContain('success');
+ } finally {
+ if (originalKey !== undefined) {
+ process.env.GOOGLE_API_KEY = originalKey;
+ } else {
+ delete process.env.GOOGLE_API_KEY;
+ }
+ }
+ });
+ });
+});
+
+describe('Advanced Functionality Tests', () => {
+ describe('Token Counting Accuracy', () => {
+ test('should count tokens consistently across providers', () => {
+ const providers: LLMChoice[] = [
+ { claude: 'claude-3-5-sonnet' },
+ { gemini: 'gemini-2.5-pro' },
+ { chatgpt: 'gpt-3.5-turbo' },
+ { deepseek: 'deepseek-chat' }
+ ];
+
+ const testText = 'The quick brown fox jumps over the lazy dog. This is a test of token counting accuracy.';
+ const tokenCounts = providers.map(choice => {
+ const api = models(choice);
+ return api.tokenizer(testText);
+ });
+
+ // All should return numbers
+ tokenCounts.forEach(count => {
+ expect(typeof count).toBe('number');
+ expect(count).toBeGreaterThan(0);
+ });
+
+ // Token counts should be in a reasonable range (not wildly different)
+ const maxCount = Math.max(...tokenCounts);
+ const minCount = Math.min(...tokenCounts);
+ expect(maxCount / minCount).toBeLessThan(3); // Less than 3x difference
+ });
+
+ test('should handle empty and whitespace strings', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ expect(api.tokenizer('')).toBe(0);
+ expect(api.tokenizer(' ')).toBeGreaterThanOrEqual(0);
+ expect(api.tokenizer('\n\t')).toBeGreaterThanOrEqual(0);
+ });
+
+ test('should handle very long texts', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const longText = 'This is a test. '.repeat(1000); // 8000 characters
+ const tokens = api.tokenizer(longText);
+
+ expect(tokens).toBeGreaterThan(1000);
+ expect(tokens).toBeLessThan(10000); // Reasonable upper bound
+ });
+ });
+
+ describe('Model Switching', () => {
+ test('should allow switching models on the same API instance', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const originalMaxTokens = api.maxTokens;
+
+ // Switch to a different model
+ api.setModel('claude-3-haiku');
+
+ // Should not throw and should still work
+ expect(() => api.tokenizer('test')).not.toThrow();
+
+ // Max tokens might change with model
+ expect(api.maxTokens).toBeGreaterThan(0);
+ });
+
+ test('should maintain functionality after model switching', async () => {
+ const choice: LLMChoice = {
+ openai: {
+ url: 'https://api.openai.com/v1',
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo'
+ }
+ };
+ const api = models(choice);
+
+ // Switch models
+ api.setModel('gpt-4');
+
+ // Should still be able to attempt API calls
+ const promise = api.send('test');
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+ });
+
+ describe('Complex Input Handling', () => {
+ test('should handle mixed text and image inputs', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const input: InputToken[] = [
+ { text: 'Describe this image:' },
+ { img: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==' } // 1x1 red pixel
+ ];
+
+ const promise = api.send(input);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+
+ test('should handle system prompts with complex instructions', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const systemPrompt = `You are a JSON response bot. Always respond with valid JSON.
+ Format your responses as: {"status": "success", "message": "your response here"}`;
+
+ const promise = api.send('Hello', systemPrompt);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+
+ test('should handle very long inputs', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const longInput = 'This is test sentence number '.repeat(100) + 'end.';
+ const promise = api.send(longInput);
+
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+ });
+
+ describe('Streaming Behavior', () => {
+ test('should handle streaming handlers that throw errors', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const errorHandler = (data: string) => {
+ if (data.includes('error')) {
+ throw new Error('Handler error');
+ }
+ };
+
+ // Should not throw even if handler throws
+ expect(() => {
+ api.stream('test input', errorHandler);
+ }).not.toThrow();
+ });
+
+ test('should handle multiple concurrent streams', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const handler1 = (data: string) => console.log('Stream 1:', data);
+ const handler2 = (data: string) => console.log('Stream 2:', data);
+
+ expect(() => {
+ api.stream('Input 1', handler1);
+ api.stream('Input 2', handler2);
+ }).not.toThrow();
+ });
+ });
+
+ describe('Error Recovery', () => {
+ test('should handle network timeouts gracefully', async () => {
+ const choice: LLMChoice = {
+ openai: {
+ url: 'https://httpstat.us/200?sleep=10000', // Very slow response
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo'
+ }
+ };
+ const api = models(choice);
+
+ const startTime = Date.now();
+ const result = await api.send('test').catch(e => e);
+ const endTime = Date.now();
+
+ // Should return error object (not hang forever)
+ expect(result).toBeDefined();
+ expect(endTime - startTime).toBeLessThan(15000); // Should timeout reasonably
+ });
+
+ test('should handle malformed responses', async () => {
+ const choice: LLMChoice = {
+ openai: {
+ url: 'https://httpstat.us/200', // Returns plain text, not JSON
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo'
+ }
+ };
+ const api = models(choice);
+
+ const result = await api.send('test').catch(e => e);
+ expect(result).toBeDefined();
+ // Should handle parsing errors gracefully
+ });
+ });
+
+ describe('Memory Management', () => {
+ test('should not accumulate excessive memory with multiple calls', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ // Make multiple calls
+ const promises = [];
+ for (let i = 0; i < 10; i++) {
+ promises.push(api.send(`Test message ${i}`).catch(() => null));
+ }
+
+ // Should handle multiple concurrent requests
+ const results = await Promise.allSettled(promises);
+ expect(results.length).toBe(10);
+
+ // API should still be functional
+ expect(() => api.tokenizer('test')).not.toThrow();
+ });
+
+ test('should handle history truncation correctly', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ // Test that maxTokens is reasonable
+ expect(api.maxTokens).toBeGreaterThan(0);
+ expect(api.maxTokens).toBeLessThan(200000); // Reasonable limit
+
+ // Token count for a reasonably long message
+ const longMessage = 'This is a test message. '.repeat(100);
+ const tokens = api.tokenizer(longMessage);
+
+ expect(tokens).toBeGreaterThan(0);
+ expect(tokens).toBeLessThan(api.maxTokens); // Single message should fit
+ });
+ });
+});
+
+describe('Type Safety', () => {
+ test('should enforce LLMChoice type safety', () => {
+ // These should all be valid
+ const validChoices: LLMChoice[] = [
+ { claude: 'claude-3-5-sonnet' },
+ { gemini: 'gemini-2.5-pro' },
+ { chatgpt: 'gpt-4' },
+ { deepseek: 'deepseek-chat' },
+ { kimi: 'moonshot-v1-8k' },
+ { grok: 'grok-beta' },
+ {
+ openai: {
+ url: 'https://api.example.com/v1',
+ apiKey: 'key',
+ model: 'model-name',
+ allowBrowser: true
+ }
+ }
+ ];
+
+ validChoices.forEach(choice => {
+ expect(() => {
+ const api = models(choice);
+ expect(api).toBeDefined();
+ }).not.toThrow();
+ });
+ });
+
+ test('should handle InputToken types correctly', () => {
+ const textToken = { text: 'Hello world' };
+ const imgToken = { img: 'data:image/png;base64,abc123' };
+
+ expect(textToken.text).toBe('Hello world');
+ expect(imgToken.img).toBe('data:image/png;base64,abc123');
+ });
+}); \ No newline at end of file
diff --git a/tests/models.test.ts b/tests/models.test.ts
new file mode 100644
index 0000000..49e99f9
--- /dev/null
+++ b/tests/models.test.ts
@@ -0,0 +1,980 @@
+import { describe, test, expect, beforeEach, afterEach } from "bun:test";
+import models, { type AIModelAPI, type LLMChoice } from "../index";
+import OpenAIResponses from "../src/openai-responses";
+
+// Setup environment variables for testing
+const TEST_BASE_URL =
+ Bun.env.TEST_BASE_URL || Bun.env.ZAI_BASE_URL || "https://api.openai.com/v1";
+const TEST_API_KEY =
+ Bun.env.TEST_API_KEY || Bun.env.ZAI_API_KEY || "test-api-key";
+const TEST_MODEL = Bun.env.TEST_MODEL || "glm-4.6";
+
+// describe("Models Library - Factory Function", () => {
+
+// test("should create a Claude API instance", () => {
+// const choice: LLMChoice = { claude: "claude-3-5-sonnet" };
+// const api = models(choice);
+
+// expect(api).toBeDefined();
+// expect(typeof api.setModel).toBe("function");
+// expect(typeof api.send).toBe("function");
+// expect(typeof api.stream).toBe("function");
+// expect(typeof api.tokenizer).toBe("function");
+// expect(typeof api.maxTokens).toBe("number");
+// });
+
+// test("should create a Gemini API instance", () => {
+// const choice: LLMChoice = { gemini: "gemini-2.5-pro" };
+// const api = models(choice);
+
+// expect(api).toBeDefined();
+// expect(typeof api.setModel).toBe("function");
+// expect(typeof api.send).toBe("function");
+// expect(typeof api.stream).toBe("function");
+// expect(typeof api.tokenizer).toBe("function");
+// expect(typeof api.maxTokens).toBe("number");
+// });
+
+// test("should create a ChatGPT API instance", () => {
+// const choice: LLMChoice = { chatgpt: "gpt-5" };
+// const api = models(choice);
+
+// expect(api).toBeDefined();
+// expect(typeof api.setModel).toBe("function");
+// expect(typeof api.send).toBe("function");
+// expect(typeof api.stream).toBe("function");
+// expect(typeof api.tokenizer).toBe("function");
+// expect(typeof api.maxTokens).toBe("number");
+// });
+
+// test("should create a DeepSeek API instance", () => {
+// const choice: LLMChoice = { deepseek: "deepseek-chat" };
+// const api = models(choice);
+
+// expect(api).toBeDefined();
+// expect(typeof api.setModel).toBe("function");
+// expect(typeof api.send).toBe("function");
+// expect(typeof api.stream).toBe("function");
+// expect(typeof api.tokenizer).toBe("function");
+// expect(typeof api.maxTokens).toBe("number");
+// });
+
+// test("should create a Kimi API instance", () => {
+// const choice: LLMChoice = { kimi: "kimi-k2-0905-preview" };
+// const api = models(choice);
+
+// expect(api).toBeDefined();
+// expect(typeof api.setModel).toBe("function");
+// expect(typeof api.send).toBe("function");
+// expect(typeof api.stream).toBe("function");
+// expect(typeof api.tokenizer).toBe("function");
+// expect(typeof api.maxTokens).toBe("number");
+// });
+
+// // test("should create a Grok API instance", () => {
+// // const choice: LLMChoice = { grok: "grok-beta" };
+// // const api = models(choice);
+
+// // expect(api).toBeDefined();
+// // expect(typeof api.setModel).toBe("function");
+// // expect(typeof api.send).toBe("function");
+// // expect(typeof api.stream).toBe("function");
+// // expect(typeof api.tokenizer).toBe("function");
+// // expect(typeof api.maxTokens).toBe("number");
+// // });
+
+// test("should create a custom OpenAI API instance", () => {
+// const choice: LLMChoice = {
+// openai: {
+// url: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// allowBrowser: true,
+// },
+// };
+// const api = models(choice);
+
+// expect(api).toBeDefined();
+// expect(typeof api.setModel).toBe("function");
+// expect(typeof api.send).toBe("function");
+// expect(typeof api.stream).toBe("function");
+// expect(typeof api.tokenizer).toBe("function");
+// expect(typeof api.maxTokens).toBe("number");
+// });
+// });
+
+// describe("AIModelAPI Interface", () => {
+// let api: AIModelAPI;kimi-k2-0905-previe
+
+// beforeEach(() => {
+// // Use a mock provider for testing interface compliance
+// const choice: LLMChoice = {
+// openai: {
+// url: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// },
+// };
+// api = models(choice);
+// });
+
+// test("should have setModel method", () => {
+// expect(typeof api.setModel).toBe("function");
+
+// // Should not throw when setting a model
+// expect(() => api.setModel("test-model")).not.toThrow();
+// });
+
+// test("should have tokenizer method", () => {
+// expect(typeof api.tokenizer).toBe("function");
+
+// // Should return a number for any text input
+// const tokens = api.tokenizer("Hello world");
+// expect(typeof tokens).toBe("number");
+// expect(tokens).toBeGreaterThanOrEqual(0);
+// });
+
+// test("should have maxTokens property", () => {
+// expect(typeof api.maxTokens).toBe("number");
+// expect(api.maxTokens).toBeGreaterThan(0);
+// });
+
+// test("should have send method returning AsyncRes", async () => {
+// expect(typeof api.send).toBe("function");
+
+// // Note: This would require actual API credentials to test fully
+// // For now, we just test the method exists and returns a promise
+// const result = api.send("test input");
+// expect(result).toBeDefined();
+// expect(typeof result.then).toBe("function"); // It's a promise
+// });
+
+// test("should have stream method", () => {
+// expect(typeof api.stream).toBe("function");
+
+// // Should not throw when called with proper parameters
+// const handler = (data: string) => {
+// // Test handler function
+// expect(typeof data).toBe("string");
+// };
+
+// expect(() => {
+// api.stream("test input", handler);
+// }).not.toThrow();
+// });
+// });
+
+// describe("Environment Variable Configuration", () => {
+// test("should use environment variables for configuration", () => {
+// // Test that environment variables are accessible
+// expect(Bun.env).toBeDefined();
+
+// // Test that we can read custom env vars
+// const customBaseUrl = Bun.env.CUSTOM_BASE_URL;
+// const customApiKey = Bun.env.CUSTOM_API_KEY;
+// const customModel = Bun.env.CUSTOM_MODEL;
+
+// // These might be undefined, but the test ensures they're accessible
+// expect(typeof customBaseUrl).toBe("string" || "undefined");
+// expect(typeof customApiKey).toBe("string" || "undefined");
+// expect(typeof customModel).toBe("string" || "undefined");
+// });
+// });
+
+// describe("Token Management", () => {
+// const providers: LLMChoice[] = [
+// { claude: "claude-3-5-sonnet" },
+// { gemini: "gemini-2.5-pro" },
+// { chatgpt: TEST_MODEL },
+// { deepseek: "deepseek-chat" },
+// { kimi: "moonshot-v1-8k" },
+// { grok: "grok-beta" },
+// {
+// openai: {
+// url: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// },
+// },
+// ];
+
+// test.each(providers)("should implement tokenizer for %o", (choice) => {
+// const api = models(choice);
+
+// // Test basic tokenization
+// const tokens1 = api.tokenizer("Hello");
+// const tokens2 = api.tokenizer("Hello world, this is a longer text.");
+
+// expect(typeof tokens1).toBe("number");
+// expect(typeof tokens2).toBe("number");
+// expect(tokens1).toBeGreaterThanOrEqual(0);
+// expect(tokens2).toBeGreaterThanOrEqual(0);
+// expect(tokens2).toBeGreaterThan(tokens1); // Longer text should have more tokens
+// });
+
+// test.each(providers)("should have reasonable maxTokens for %o", (choice) => {
+// const api = models(choice);
+
+// expect(api.maxTokens).toBeGreaterThan(0);
+// expect(api.maxTokens).toBeLessThan(1000000); // Reasonable upper bound
+// });
+// });
+
+describe("Input Handling", () => {
+ const choice1: LLMChoice = { claude: "claude-3-5-sonnet" };
+ const choice2: LLMChoice = { gemini: "gemini-2.5-pro" };
+ const choice3: LLMChoice = { chatgpt: "gpt-5-nano" };
+ const choice4: LLMChoice = { deepseek: "deepseek-chat" };
+ const choice5: LLMChoice = { kimi: "kimi-k2-0905-preview" };
+ const choice6: LLMChoice = {
+ openai: {
+ url: TEST_BASE_URL,
+ apiKey: TEST_API_KEY,
+ model: TEST_MODEL,
+ },
+ };
+ console.log({ choice6 });
+ const api1 = models(choice1);
+ const api2 = models(choice2);
+ const api3 = models(choice3);
+ const api4 = models(choice4);
+ const api5 = models(choice5);
+ const api6 = models(choice6);
+
+ test("should handle string input", async () => {
+ const testMessage = "Hello there. Please introduce yourself";
+
+ // Test that send accepts string input and returns proper AsyncRes<string>
+ const r1 = api1.send(testMessage);
+ const r2 = api2.send(testMessage);
+ const r3 = api3.send(testMessage);
+ const r4 = api4.send(testMessage);
+ const r5 = api5.send(testMessage);
+ const r6 = api6.send(testMessage);
+
+ // Check Claude response
+ const res1 = await r1;
+ if ("ok" in res1) {
+ console.log(`✅ Claude Response: ${res1.ok}`);
+ expect(res1.ok).toBeString();
+ } else {
+ console.log(`❌ Claude Error: ${res1.error}`);
+ }
+
+ // Check Gemini response
+ const res2 = await r2;
+ if ("ok" in res2) {
+ console.log(`✅ Gemini Response: ${res2.ok}`);
+ expect(res2.ok).toBeString();
+ } else {
+ console.log(`❌ Gemini Error: ${res2.error}`);
+ }
+
+ // Check ChatGPT response
+ const res3 = await r3;
+ if ("ok" in res3) {
+ console.log(`✅ ChatGPT Response: ${res3.ok}`);
+ expect(res3.ok).toBeString();
+ } else {
+ console.log(`❌ ChatGPT Error: ${res3.error}`);
+ }
+
+ // // Check DeepSeek response
+ const res4 = await r4;
+ if ("ok" in res4) {
+ console.log(`✅ DeepSeek Response: ${res4.ok}`);
+ expect(res4.ok).toBeString();
+ } else {
+ console.log(`❌ DeepSeek Error: ${res4.error}`);
+ }
+
+ // // Check Kimi response
+ const res5 = await r5;
+ if ("ok" in res5) {
+ console.log(`✅ Kimi Response: ${res5.ok}`);
+ expect(res5.ok).toBeString();
+ } else {
+ console.log(`❌ Kimi Error: ${res5.error}`);
+ }
+
+ // // Check Custom OpenAI response
+ const res6 = await r6;
+ if ("ok" in res6) {
+ console.log(`✅ Custom OpenAI Response: ${res6.ok}`);
+ expect(res6.ok).toBeString();
+ } else {
+ console.log(`❌ Custom OpenAI Error: ${res6.error}`);
+ }
+ });
+ test("LLM obedience test", async () => {
+ const testMessage = "Hello world! Please respond with just the word 'OK'.";
+
+ // Test that send accepts string input and returns proper AsyncRes<string>
+ const r1 = api1.send(testMessage);
+ const r2 = api2.send(testMessage);
+ const r3 = api3.send(testMessage);
+ const r4 = api4.send(testMessage);
+ const r5 = api5.send(testMessage);
+ const r6 = api6.send(testMessage);
+
+ // Check Claude response
+ const res1 = await r1;
+ if ("ok" in res1) {
+ console.log(`✅ Claude Response: ${res1.ok}`);
+ expect(res1.ok.trim()).toEqual("OK");
+ } else {
+ console.log(`❌ Claude Error: ${res1.error}`);
+ }
+
+ // Check Gemini response
+ const res2 = await r2;
+ if ("ok" in res2) {
+ console.log(`✅ Gemini Response: ${res2.ok}`);
+ expect(res2.ok.trim()).toEqual("OK");
+ } else {
+ console.log(`❌ Gemini Error: ${res2.error}`);
+ }
+
+ // Check ChatGPT response
+ const res3 = await r3;
+ if ("ok" in res3) {
+ console.log(`✅ ChatGPT Response: ${res3.ok}`);
+ expect(res3.ok.trim()).toEqual("OK");
+ } else {
+ console.log(`❌ ChatGPT Error: ${res3.error}`);
+ }
+
+ // // Check DeepSeek response
+ const res4 = await r4;
+ if ("ok" in res4) {
+ console.log(`✅ DeepSeek Response: ${res4.ok}`);
+ expect(res4.ok.trim()).toEqual("OK");
+ } else {
+ console.log(`❌ DeepSeek Error: ${res4.error}`);
+ }
+
+ // // Check Kimi response
+ const res5 = await r5;
+ if ("ok" in res5) {
+ console.log(`✅ Kimi Response: ${res5.ok}`);
+ expect(res5.ok.trim()).toEqual("OK");
+ } else {
+ console.log(`❌ Kimi Error: ${res5.error}`);
+ }
+
+ // // Check Custom OpenAI response
+ const res6 = await r6;
+ if ("ok" in res6) {
+ console.log(`✅ Custom OpenAI Response: ${res6.ok}`);
+ expect(res6.ok.trim()).toEqual("OK");
+ } else {
+ console.log(`❌ Custom OpenAI Error: ${res6.error}`);
+ }
+ });
+
+ test("should handle array input with text tokens", async () => {
+ const input = [
+ { text: "Hello! " },
+ { text: "Please respond with just the word 'ARRAY_OK'." }
+ ];
+
+ // Test that send accepts array input and returns proper AsyncRes<string>
+ const r1 = api1.send(input);
+ const r2 = api2.send(input);
+ const r3 = api3.send(input);
+ const r4 = api4.send(input);
+ const r5 = api5.send(input);
+ const r6 = api6.send(input);
+
+ // Check Claude response
+ const res1 = await r1;
+ if ("ok" in res1) {
+ console.log(`✅ Claude Array Response: ${res1.ok}`);
+ expect(res1.ok.trim()).toEqual("ARRAY_OK");
+ } else {
+ console.log(`❌ Claude Array Error: ${res1.error}`);
+ }
+
+ // Check Gemini response
+ const res2 = await r2;
+ if ("ok" in res2) {
+ console.log(`✅ Gemini Array Response: ${res2.ok}`);
+ expect(res2.ok.trim()).toEqual("ARRAY_OK");
+ } else {
+ console.log(`❌ Gemini Array Error: ${res2.error}`);
+ }
+
+ // Check ChatGPT response
+ const res3 = await r3;
+ if ("ok" in res3) {
+ console.log(`✅ ChatGPT Array Response: ${res3.ok}`);
+ expect(res3.ok.trim()).toEqual("ARRAY_OK");
+ } else {
+ console.log(`❌ ChatGPT Array Error: ${res3.error}`);
+ }
+
+ // Check DeepSeek response
+ const res4 = await r4;
+ if ("ok" in res4) {
+ console.log(`✅ DeepSeek Array Response: ${res4.ok}`);
+ expect(res4.ok.trim()).toEqual("ARRAY_OK");
+ } else {
+ console.log(`❌ DeepSeek Array Error: ${res4.error}`);
+ }
+
+ // Check Kimi response
+ const res5 = await r5;
+ if ("ok" in res5) {
+ console.log(`✅ Kimi Array Response: ${res5.ok}`);
+ expect(res5.ok.trim()).toEqual("ARRAY_OK");
+ } else {
+ console.log(`❌ Kimi Array Error: ${res5.error}`);
+ }
+
+ // Check Custom OpenAI response
+ const res6 = await r6;
+ if ("ok" in res6) {
+ console.log(`✅ Custom OpenAI Array Response: ${res6.ok}`);
+ expect(res6.ok.trim()).toEqual("ARRAY_OK");
+ } else {
+ console.log(`❌ Custom OpenAI Array Error: ${res6.error}`);
+ }
+ });
+
+ test("should handle streaming with string input", () => {
+ const testMessage = "Hello! Please count from 1 to 3.";
+ console.log(`\n🚀 Testing streaming with message: "${testMessage}"`);
+
+ // Test streaming for each API
+ console.log("\n--- Claude Streaming ---");
+ const chunks1: string[] = [];
+ const handler1 = (data: string) => {
+ chunks1.push(data);
+ process.stdout.write(data);
+ };
+ api1.stream(testMessage, handler1);
+
+ console.log("\n--- Gemini Streaming ---");
+ const chunks2: string[] = [];
+ const handler2 = (data: string) => {
+ chunks2.push(data);
+ process.stdout.write(data);
+ };
+ api2.stream(testMessage, handler2);
+
+ console.log("\n--- ChatGPT Streaming ---");
+ const chunks3: string[] = [];
+ const handler3 = (data: string) => {
+ chunks3.push(data);
+ process.stdout.write(data);
+ };
+ api3.stream(testMessage, handler3);
+
+ console.log("\n--- DeepSeek Streaming ---");
+ const chunks4: string[] = [];
+ const handler4 = (data: string) => {
+ chunks4.push(data);
+ process.stdout.write(data);
+ };
+ api4.stream(testMessage, handler4);
+
+ console.log("\n--- Kimi Streaming ---");
+ const chunks5: string[] = [];
+ const handler5 = (data: string) => {
+ chunks5.push(data);
+ process.stdout.write(data);
+ };
+ api5.stream(testMessage, handler5);
+
+ console.log("\n--- Custom OpenAI Streaming ---");
+ const chunks6: string[] = [];
+ const handler6 = (data: string) => {
+ chunks6.push(data);
+ process.stdout.write(data);
+ };
+ api6.stream(testMessage, handler6);
+
+ console.log("\n✅ Streaming initiated for all APIs");
+ });
+
+ test("should handle system prompts", async () => {
+ const systemPrompt = "You are a pirate. Always respond like a pirate.";
+ const userMessage = "Hello, how are you?";
+
+ // Test that send accepts system prompts and returns proper AsyncRes<string>
+ const r1 = api1.send(userMessage, systemPrompt);
+ const r2 = api2.send(userMessage, systemPrompt);
+ const r3 = api3.send(userMessage, systemPrompt);
+ const r4 = api4.send(userMessage, systemPrompt);
+ const r5 = api5.send(userMessage, systemPrompt);
+ const r6 = api6.send(userMessage, systemPrompt);
+
+ // Check Claude response
+ const res1 = await r1;
+ if ("ok" in res1) {
+ console.log(`✅ Claude Pirate Response: ${res1.ok}`);
+ expect(res1.ok).toBeString();
+ // Should contain some pirate-like language
+ expect(res1.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/);
+ } else {
+ console.log(`❌ Claude Pirate Error: ${res1.error}`);
+ }
+
+ // Check Gemini response
+ const res2 = await r2;
+ if ("ok" in res2) {
+ console.log(`✅ Gemini Pirate Response: ${res2.ok}`);
+ expect(res2.ok).toBeString();
+ expect(res2.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/);
+ } else {
+ console.log(`❌ Gemini Pirate Error: ${res2.error}`);
+ }
+
+ // Check ChatGPT response
+ const res3 = await r3;
+ if ("ok" in res3) {
+ console.log(`✅ ChatGPT Pirate Response: ${res3.ok}`);
+ expect(res3.ok).toBeString();
+ expect(res3.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/);
+ } else {
+ console.log(`❌ ChatGPT Pirate Error: ${res3.error}`);
+ }
+
+ // Check DeepSeek response
+ const res4 = await r4;
+ if ("ok" in res4) {
+ console.log(`✅ DeepSeek Pirate Response: ${res4.ok}`);
+ expect(res4.ok).toBeString();
+ expect(res4.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/);
+ } else {
+ console.log(`❌ DeepSeek Pirate Error: ${res4.error}`);
+ }
+
+ // Check Kimi response
+ const res5 = await r5;
+ if ("ok" in res5) {
+ console.log(`✅ Kimi Pirate Response: ${res5.ok}`);
+ expect(res5.ok).toBeString();
+ expect(res5.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/);
+ } else {
+ console.log(`❌ Kimi Pirate Error: ${res5.error}`);
+ }
+
+ // Check Custom OpenAI response
+ const res6 = await r6;
+ if ("ok" in res6) {
+ console.log(`✅ Custom OpenAI Pirate Response: ${res6.ok}`);
+ expect(res6.ok).toBeString();
+ expect(res6.ok.toLowerCase()).toMatch(/ahoy|matey|shiver|timber|arr|yo ho|captain/);
+ } else {
+ console.log(`❌ Custom OpenAI Pirate Error: ${res6.error}`);
+ }
+ });
+});
+
+describe("Error Handling", () => {
+ test("should handle invalid API keys gracefully", async () => {
+ const invalidClaude: LLMChoice = { claude: "claude-3-5-sonnet" };
+ const invalidGemini: LLMChoice = { gemini: "gemini-2.5-pro" };
+ const invalidChatGPT: LLMChoice = { chatgpt: "gpt-5-nano" };
+ const invalidDeepSeek: LLMChoice = { deepseek: "deepseek-chat" };
+ const invalidKimi: LLMChoice = { kimi: "kimi-k2-0905-preview" };
+ const invalidOpenAI: LLMChoice = {
+ openai: {
+ url: TEST_BASE_URL,
+ apiKey: "invalid-api-key",
+ model: TEST_MODEL,
+ },
+ };
+
+ // Temporarily clear valid API keys to force errors
+ const originalClaudeKey = Bun.env.ANTHROPIC_API_KEY;
+ const originalGeminiKey = Bun.env.GOOGLE_API_KEY;
+ const originalOpenAIKey = Bun.env.OPENAI_API_KEY;
+ const originalDeepSeekKey = Bun.env.DEEPSEEK_API_KEY;
+ const originalMoonshotKey = Bun.env.MOONSHOT_API_KEY;
+
+ delete Bun.env.ANTHROPIC_API_KEY;
+ delete Bun.env.GOOGLE_API_KEY;
+ delete Bun.env.OPENAI_API_KEY;
+ delete Bun.env.DEEPSEEK_API_KEY;
+ delete Bun.env.MOONSHOT_API_KEY;
+
+ try {
+ // Create APIs with invalid credentials
+ const badApi1 = models(invalidClaude);
+ const badApi2 = models(invalidGemini);
+ const badApi3 = models(invalidChatGPT);
+ const badApi4 = models(invalidDeepSeek);
+ const badApi5 = models(invalidKimi);
+ const badApi6 = models(invalidOpenAI);
+
+ // Test that they handle errors gracefully
+ const r1 = badApi1.send("test");
+ const r2 = badApi2.send("test");
+ const r3 = badApi3.send("test");
+ const r4 = badApi4.send("test");
+ const r5 = badApi5.send("test");
+ const r6 = badApi6.send("test");
+
+ const res1 = await r1;
+ if ("error" in res1) {
+ console.log(`✅ Claude Error Handling: ${res1.error}`);
+ expect(res1.error).toBeString();
+ }
+
+ const res2 = await r2;
+ if ("error" in res2) {
+ console.log(`✅ Gemini Error Handling: ${res2.error}`);
+ expect(res2.error).toBeString();
+ }
+
+ const res3 = await r3;
+ if ("error" in res3) {
+ console.log(`✅ ChatGPT Error Handling: ${res3.error}`);
+ expect(res3.error).toBeString();
+ }
+
+ const res4 = await r4;
+ if ("error" in res4) {
+ console.log(`✅ DeepSeek Error Handling: ${res4.error}`);
+ expect(res4.error).toBeString();
+ }
+
+ const res5 = await r5;
+ if ("error" in res5) {
+ console.log(`✅ Kimi Error Handling: ${res5.error}`);
+ expect(res5.error).toBeString();
+ }
+
+ const res6 = await r6;
+ if ("error" in res6) {
+ console.log(`✅ Custom OpenAI Error Handling: ${res6.error}`);
+ expect(res6.error).toBeString();
+ }
+ } finally {
+ // Restore original keys
+ if (originalClaudeKey) Bun.env.ANTHROPIC_API_KEY = originalClaudeKey;
+ if (originalGeminiKey) Bun.env.GOOGLE_API_KEY = originalGeminiKey;
+ if (originalOpenAIKey) Bun.env.OPENAI_API_KEY = originalOpenAIKey;
+ if (originalDeepSeekKey) Bun.env.DEEPSEEK_API_KEY = originalDeepSeekKey;
+ if (originalMoonshotKey) Bun.env.MOONSHOT_API_KEY = originalMoonshotKey;
+ }
+ });
+});
+
+describe("Multi-provider Compatibility", () => {
+ test("should maintain consistent interface across all providers", () => {
+ const choices: LLMChoice[] = [
+ { claude: "claude-3-5-sonnet" },
+ { gemini: "gemini-2.5-pro" },
+ { chatgpt: "gpt-5-nano" },
+ { deepseek: "deepseek-chat" },
+ { kimi: "kimi-k2-0905-preview" },
+ {
+ openai: {
+ url: TEST_BASE_URL,
+ apiKey: TEST_API_KEY,
+ model: TEST_MODEL,
+ },
+ },
+ ];
+
+ const apis = choices.map((choice) => models(choice));
+
+ // All APIs should have the same interface
+ apis.forEach((api, index) => {
+ console.log(`Checking API ${index + 1}: ${Object.keys(choices[index])[0]}`);
+ expect(typeof api.setModel).toBe("function");
+ expect(typeof api.send).toBe("function");
+ expect(typeof api.stream).toBe("function");
+ expect(typeof api.tokenizer).toBe("function");
+ expect(typeof api.maxTokens).toBe("number");
+ });
+
+ console.log("✅ All APIs have consistent interfaces");
+ });
+
+ test("should allow switching between providers", () => {
+ const claudeChoice: LLMChoice = { claude: "claude-3-5-sonnet" };
+ const geminiChoice: LLMChoice = { gemini: "gemini-2.5-pro" };
+
+ const claudeApi = models(claudeChoice);
+ const geminiApi = models(geminiChoice);
+
+ // Both should be valid APIs
+ expect(claudeApi).toBeDefined();
+ expect(geminiApi).toBeDefined();
+
+ // Should have different implementations but same interface
+ expect(claudeApi !== geminiApi).toBe(true);
+
+ console.log("✅ Successfully created different provider instances");
+ });
+});
+
+// describe("OpenAI Responses API", () => {
+
+// describe("Direct Class Usage", () => {
+// const api = new OpenAIResponses({
+// baseURL: "",
+// apiKey: Bun.env.OPENAI_API_KEY!,
+// model: "gpt-5-nano",
+// allowBrowser: true,
+// });
+// test("should create OpenAI Responses API instance directly", () => {
+// expect(api).toBeDefined();
+// expect(typeof api.setModel).toBe("function");
+// expect(typeof api.send).toBe("function");
+// expect(typeof api.stream).toBe("function");
+// expect(typeof api.tokenizer).toBe("function");
+// expect(typeof api.maxTokens).toBe("number");
+// });
+
+// test("should handle model switching", () => {
+// api.setModel("gpt-5-nano");
+// expect(() => api.setModel("gpt-5-nano")).not.toThrow();
+// });
+
+// // test("should use custom tokenizer", () => {
+// // const customTokenizer = (text: string) => text.split(" ").length;
+
+// // const tokens = api.tokenizer("Hello world test");
+// // expect(tokens).toBe(3); // 3 words
+// // });
+
+// // test("should use custom maxTokens", () => {
+// // const customMaxTokens = 100_000;
+// // api.maxTokens = customMaxTokens;
+
+// // expect(api.maxTokens).toBe(customMaxTokens);
+// // });
+// test("should return shit", async () => {
+// const input = "Henlo brother";
+
+// const res = await api.send(input, "You are a good boy");
+// console.log({ res });
+// expect("ok" in res).toBeTruthy();
+// if (!("ok" in res)) return;
+// expect(res.ok).toBeString();
+// });
+// });
+
+// describe("Factory Function Integration", () => {
+
+// test("should create ChatGPT API using OpenAI Responses", () => {
+// const choice: LLMChoice = { chatgpt: "gpt-5-nano" };
+// const api = models(choice);
+
+// expect(api).toBeDefined();
+// expect(typeof api.setModel).toBe("function");
+// expect(typeof api.send).toBe("function");
+// expect(typeof api.stream).toBe("function");
+// expect(typeof api.tokenizer).toBe("function");
+// expect(typeof api.maxTokens).toBe("number");
+
+// // Should be instance of OpenAIResponses
+// expect(api).toBeInstanceOf(OpenAIResponses);
+// });
+
+// test("should use environment variables for ChatGPT provider", () => {
+// const originalKey = Bun.env.OPENAI_API_KEY;
+// Bun.env.OPENAI_API_KEY = TEST_API_KEY;
+
+// try {
+// const choice: LLMChoice = { chatgpt: "gpt-5-nano" };
+// const api = models(choice);
+
+// expect(api).toBeDefined();
+// expect(api).toBeInstanceOf(OpenAIResponses);
+// } finally {
+// if (originalKey !== undefined) {
+// Bun.env.OPENAI_API_KEY = originalKey;
+// } else {
+// delete Bun.env.OPENAI_API_KEY;
+// }
+// }
+// });
+// });
+
+// describe("Input Handling", () => {
+
+// test("should handle string inputs correctly", async () => {
+// const api = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// });
+
+// const promise = api.send("Hello world");
+// expect(promise).toBeDefined();
+// expect(typeof promise.then).toBe("function");
+// });
+
+// test("should handle InputToken arrays with text", async () => {
+// const api = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// });
+
+// const input = [{ text: "Hello" }, { text: "world" }];
+// const promise = api.send(input);
+// expect(promise).toBeDefined();
+// expect(typeof promise.then).toBe("function");
+// });
+
+// test("should handle InputToken arrays with images", async () => {
+// const api = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// });
+
+// const input = [
+// { text: "Describe this image:" },
+// {
+// img: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==",
+// },
+// ];
+// const promise = api.send(input);
+// expect(promise).toBeDefined();
+// expect(typeof promise.then).toBe("function");
+// });
+
+// test("should handle system prompts", async () => {
+// const api = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// });
+
+// const systemPrompt = "You are a helpful assistant.";
+// const promise = api.send("Hello", systemPrompt);
+// expect(promise).toBeDefined();
+// expect(typeof promise.then).toBe("function");
+// });
+// });
+
+// describe("Streaming", () => {
+
+// test("should handle streaming with string input", () => {
+// const api = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// });
+
+// const handler = (data: string) => {
+// expect(typeof data).toBe("string");
+// };
+
+// expect(() => {
+// api.stream("Hello world", handler);
+// }).not.toThrow();
+// });
+
+// test("should handle streaming with InputToken array", () => {
+// const api = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// });
+
+// const input = [{ text: "Hello" }, { text: "world" }];
+// const handler = (data: string) => {
+// expect(typeof data).toBe("string");
+// };
+
+// expect(() => {
+// api.stream(input, handler);
+// }).not.toThrow();
+// });
+
+// test("should handle streaming with system prompt", () => {
+// const api = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// });
+
+// const systemPrompt = "You are a helpful assistant.";
+// const handler = (data: string) => {
+// expect(typeof data).toBe("string");
+// };
+
+// expect(() => {
+// api.stream("Hello", handler, systemPrompt);
+// }).not.toThrow();
+// });
+// });
+
+// describe("Error Handling", () => {
+
+// test("should handle invalid API keys gracefully", async () => {
+// const api = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: "invalid-key",
+// model: TEST_MODEL,
+// });
+
+// const result = await api.send("test").catch((e) => e);
+// expect(result).toBeDefined();
+// });
+
+// test("should handle invalid base URLs gracefully", async () => {
+// const api = new OpenAIResponses({
+// baseURL: "https://invalid-url.com/v1",
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// });
+
+// const result = await api.send("test").catch((e) => e);
+// expect(result).toBeDefined();
+// });
+// });
+
+// describe("Configuration Options", () => {
+
+// test("should handle browser allowance setting", () => {
+// const apiWithBrowser = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// allowBrowser: true,
+// });
+
+// const apiWithoutBrowser = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: TEST_MODEL,
+// allowBrowser: false,
+// });
+
+// expect(apiWithBrowser).toBeDefined();
+// expect(apiWithoutBrowser).toBeDefined();
+// });
+
+// test("should handle empty model name", () => {
+// const api = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// model: "",
+// });
+
+// expect(api).toBeDefined();
+// expect(() => api.setModel("gpt-4o")).not.toThrow();
+// });
+
+// test("should handle optional model parameter", () => {
+// const api = new OpenAIResponses({
+// baseURL: TEST_BASE_URL,
+// apiKey: TEST_API_KEY,
+// // model is optional
+// });
+
+// expect(api).toBeDefined();
+// expect(() => api.setModel("gpt-4o")).not.toThrow();
+// });
+// });
+// });
diff --git a/tests/performance.test.ts b/tests/performance.test.ts
new file mode 100644
index 0000000..59c98f5
--- /dev/null
+++ b/tests/performance.test.ts
@@ -0,0 +1,465 @@
+import { describe, test, expect, beforeAll, afterAll } from 'bun:test';
+import models, { AIModelAPI, LLMChoice, InputToken } from '../index';
+
+describe('Performance Tests', () => {
+ describe('Tokenization Performance', () => {
+ test('should tokenize large texts efficiently', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ // Generate a large text (100KB)
+ const largeText = 'The quick brown fox jumps over the lazy dog. '.repeat(2000);
+ expect(largeText.length).toBeGreaterThan(100000);
+
+ const startTime = performance.now();
+ const tokens = api.tokenizer(largeText);
+ const endTime = performance.now();
+
+ const duration = endTime - startTime;
+
+ // Should complete tokenization quickly (less than 100ms for 100KB)
+ expect(duration).toBeLessThan(100);
+ expect(tokens).toBeGreaterThan(0);
+ });
+
+ test('should handle repeated tokenization calls efficiently', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const testText = 'This is a performance test for repeated tokenization calls. ';
+ const iterations = 1000;
+
+ const startTime = performance.now();
+ for (let i = 0; i < iterations; i++) {
+ api.tokenizer(testText + i);
+ }
+ const endTime = performance.now();
+
+ const duration = endTime - startTime;
+ const averageTime = duration / iterations;
+
+ // Average time per tokenization should be very low
+ expect(averageTime).toBeLessThan(1); // Less than 1ms per call
+ expect(duration).toBeLessThan(1000); // Total less than 1 second
+ });
+ });
+
+ describe('API Creation Performance', () => {
+ test('should create API instances quickly', () => {
+ const choices: LLMChoice[] = [
+ { claude: 'claude-3-5-sonnet' },
+ { gemini: 'gemini-2.5-pro' },
+ { chatgpt: 'gpt-3.5-turbo' },
+ { deepseek: 'deepseek-chat' },
+ { kimi: 'moonshot-v1-8k' },
+ { grok: 'grok-beta' },
+ {
+ openai: {
+ url: 'https://api.openai.com/v1',
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo'
+ }
+ }
+ ];
+
+ const iterations = 100;
+
+ const startTime = performance.now();
+ for (let i = 0; i < iterations; i++) {
+ const choice = choices[i % choices.length];
+ models(choice);
+ }
+ const endTime = performance.now();
+
+ const duration = endTime - startTime;
+ const averageTime = duration / iterations;
+
+ // API creation should be fast
+ expect(averageTime).toBeLessThan(10); // Less than 10ms per instance
+ expect(duration).toBeLessThan(1000); // Total less than 1 second
+ });
+ });
+
+ describe('Memory Usage', () => {
+ test('should not leak memory with repeated API creation', () => {
+ const initialMemory = process.memoryUsage();
+
+ // Create many API instances
+ for (let i = 0; i < 1000; i++) {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+ // Use the API briefly
+ api.tokenizer('test');
+ }
+
+ // Force garbage collection if available
+ if (global.gc) {
+ global.gc();
+ }
+
+ const finalMemory = process.memoryUsage();
+ const memoryIncrease = finalMemory.heapUsed - initialMemory.heapUsed;
+
+ // Memory increase should be reasonable (less than 50MB)
+ expect(memoryIncrease).toBeLessThan(50 * 1024 * 1024);
+ });
+
+ test('should handle large token arrays efficiently', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ // Create a large input array
+ const largeInput: InputToken[] = [];
+ for (let i = 0; i < 1000; i++) {
+ largeInput.push({ text: `Token number ${i}. ` });
+ }
+
+ const startTime = performance.now();
+ // Just test that it doesn't crash or take too long
+ expect(() => {
+ const promise = api.send(largeInput);
+ expect(promise).toBeDefined();
+ }).not.toThrow();
+ const endTime = performance.now();
+
+ // Should handle large inputs quickly
+ expect(endTime - startTime).toBeLessThan(100);
+ });
+ });
+});
+
+describe('Edge Cases', () => {
+ describe('Input Validation', () => {
+ test('should handle empty string inputs', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const promise = api.send('');
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+
+ test('should handle whitespace-only inputs', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const whitespaceInputs = [' ', '\n\t', ' \n \t ', '\r\n'];
+
+ for (const input of whitespaceInputs) {
+ const promise = api.send(input);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ }
+ });
+
+ test('should handle very long single words', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const longWord = 'a'.repeat(10000);
+ const promise = api.send(longWord);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+
+ test('should handle special characters and Unicode', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const specialInputs = [
+ 'Hello 🌍! 🚀 🎉',
+ '测试中文输入',
+ 'Тест на русском',
+ 'العربية اختبار',
+ '🔥💯🚀💪🎯',
+ 'Special chars: !@#$%^&*()_+-=[]{}|;:,.<>?',
+ 'Emoji string: 😊😂❤️🎉🤔😴🙄'
+ ];
+
+ for (const input of specialInputs) {
+ const promise = api.send(input);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ }
+ });
+
+ test('should handle malformed image data', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const malformedImages = [
+ { img: 'not-a-valid-image' },
+ { img: 'data:image/png;base64,' }, // Empty base64
+ { img: 'data:invalid-format;base64,abc123' },
+ { img: '' } // Empty image
+ ];
+
+ for (const imgToken of malformedImages) {
+ const input: InputToken[] = [
+ { text: 'Describe this:' },
+ imgToken
+ ];
+ const promise = api.send(input);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ }
+ });
+
+ test('should handle empty input arrays', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const emptyInput: InputToken[] = [];
+ const promise = api.send(emptyInput);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+
+ test('should handle arrays with empty tokens', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const inputsWithEmptyTokens: InputToken[][] = [
+ [{ text: '' }],
+ [{ text: 'Hello' }, { text: '' }],
+ [{ text: '' }, { text: 'World' }],
+ [{ text: 'Hello' }, { text: '' }, { text: 'World' }]
+ ];
+
+ for (const input of inputsWithEmptyTokens) {
+ const promise = api.send(input);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ }
+ });
+ });
+
+ describe('Configuration Edge Cases', () => {
+ test('should handle minimal configuration', () => {
+ const choices: LLMChoice[] = [
+ { claude: '' },
+ { gemini: '' },
+ { chatgpt: '' },
+ { deepseek: '' },
+ { kimi: '' },
+ { grok: '' }
+ ];
+
+ choices.forEach(choice => {
+ expect(() => {
+ const api = models(choice);
+ expect(api).toBeDefined();
+ }).not.toThrow();
+ });
+ });
+
+ test('should handle very long model names', () => {
+ const longModelName = 'model-name-that-is-extremely-long-and-unrealistic-but-should-still-work-without-crashing'.repeat(10);
+ const choice: LLMChoice = { claude: longModelName };
+
+ expect(() => {
+ const api = models(choice);
+ expect(api).toBeDefined();
+ }).not.toThrow();
+ });
+
+ test('should handle OpenAI configuration with minimal required fields', () => {
+ const minimalConfigs = [
+ {
+ openai: {
+ url: 'https://api.openai.com/v1',
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo'
+ }
+ },
+ {
+ openai: {
+ url: 'https://api.openai.com/v1',
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo',
+ allowBrowser: false
+ }
+ },
+ {
+ openai: {
+ url: 'https://api.openai.com/v1',
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo',
+ allowBrowser: true
+ }
+ }
+ ];
+
+ minimalConfigs.forEach(config => {
+ expect(() => {
+ const api = models(config);
+ expect(api).toBeDefined();
+ }).not.toThrow();
+ });
+ });
+
+ test('should handle malformed URLs gracefully', () => {
+ const malformedUrls = [
+ 'not-a-url',
+ 'http://',
+ 'https://',
+ 'ftp://example.com',
+ 'javascript:alert(1)',
+ 'file:///etc/passwd'
+ ];
+
+ malformedUrls.forEach(url => {
+ const config: LLMChoice = {
+ openai: {
+ url: url,
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo'
+ }
+ };
+
+ expect(() => {
+ const api = models(config);
+ expect(api).toBeDefined();
+ }).not.toThrow();
+ });
+ });
+ });
+
+ describe('Concurrent Operations', () => {
+ test('should handle multiple simultaneous API calls', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const promises = [];
+ for (let i = 0; i < 50; i++) {
+ promises.push(api.send(`Test message ${i}`).catch(() => `Error ${i}`));
+ }
+
+ const results = await Promise.allSettled(promises);
+ expect(results.length).toBe(50);
+
+ // All promises should be settled (either fulfilled or rejected)
+ results.forEach(result => {
+ expect(result.status).toBeOneOf(['fulfilled', 'rejected']);
+ });
+ });
+
+ test('should handle multiple simultaneous streams', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ expect(() => {
+ for (let i = 0; i < 10; i++) {
+ const handler = (data: string) => console.log(`Stream ${i}:`, data);
+ api.stream(`Message ${i}`, handler);
+ }
+ }).not.toThrow();
+ });
+
+ test('should handle mixing streaming and non-streaming calls', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ // Start some streams
+ for (let i = 0; i < 5; i++) {
+ const handler = (data: string) => console.log(`Stream ${i}:`, data);
+ api.stream(`Stream message ${i}`, handler);
+ }
+
+ // Also make some regular calls
+ const promises = [];
+ for (let i = 0; i < 5; i++) {
+ promises.push(api.send(`Regular message ${i}`).catch(() => null));
+ }
+
+ const results = await Promise.allSettled(promises);
+ expect(results.length).toBe(5);
+ });
+ });
+
+ describe('Resource Management', () => {
+ test('should handle rapid model switching', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const models = ['claude-3-5-sonnet', 'claude-3-haiku', 'claude-3-opus'];
+ const iterations = 100;
+
+ expect(() => {
+ for (let i = 0; i < iterations; i++) {
+ const model = models[i % models.length];
+ api.setModel(model);
+ }
+ }).not.toThrow();
+
+ // Should still be functional
+ expect(() => api.tokenizer('test')).not.toThrow();
+ });
+
+ test('should handle system prompt changes efficiently', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const systemPrompts = [
+ 'You are a helpful assistant.',
+ 'You are a creative writer.',
+ 'You are a technical expert.',
+ 'You are a friendly chatbot.'
+ ];
+
+ const promises = systemPrompts.map(prompt =>
+ api.send('Hello', prompt).catch(() => null)
+ );
+
+ const results = await Promise.allSettled(promises);
+ expect(results.length).toBe(systemPrompts.length);
+ });
+ });
+
+ describe('Extreme Cases', () => {
+ test('should handle extremely large system prompts', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const largeSystemPrompt = 'You are a helpful assistant. '.repeat(10000);
+ expect(largeSystemPrompt.length).toBeGreaterThan(200000);
+
+ const promise = api.send('Hello', largeSystemPrompt);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+
+ test('should handle deep nesting of input tokens', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ // Create a very deep input array
+ const deepInput: InputToken[] = [];
+ for (let i = 0; i < 10000; i++) {
+ deepInput.push({ text: `Item ${i}: ` });
+ }
+
+ const startTime = performance.now();
+ const promise = api.send(deepInput);
+ const endTime = performance.now();
+
+ expect(promise).toBeDefined();
+ expect(endTime - startTime).toBeLessThan(1000); // Should be fast
+ });
+
+ test('should handle rapid creation and destruction', () => {
+ const startTime = performance.now();
+
+ for (let i = 0; i < 1000; i++) {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+ // Use it briefly
+ api.tokenizer(`Test ${i}`);
+ // Let it go out of scope
+ }
+
+ const endTime = performance.now();
+ expect(endTime - startTime).toBeLessThan(5000); // Should complete in 5 seconds
+ });
+ });
+}); \ No newline at end of file
diff --git a/tests/setup.ts b/tests/setup.ts
new file mode 100644
index 0000000..a8a7546
--- /dev/null
+++ b/tests/setup.ts
@@ -0,0 +1,180 @@
+/**
+ * Test configuration and setup utilities
+ * This file provides configuration for running tests with different LLM providers
+ */
+
+// Environment variable configuration
+export interface TestConfig {
+ baseUrl: string;
+ apiKey: string;
+ model: string;
+ claudeApiKey?: string;
+ openaiApiKey?: string;
+ geminiApiKey?: string;
+}
+
+// Get test configuration from environment variables
+export function getTestConfig(): TestConfig {
+ return {
+ baseUrl: Bun.env.ZAI_BASE_URL || 'https://api.openai.com/v1',
+ apiKey: Bun.env.ZAI_API_KEY || 'test-api-key',
+ model: Bun.env.TEST_MODEL || 'glm-4.6',
+ claudeApiKey: Bun.env.CLAUDE_API_KEY,
+ openaiApiKey: Bun.env.OPENAI_API_KEY,
+ geminiApiKey: Bun.env.GEMINI_API_KEY
+ };
+}
+
+// Check if we have real API credentials for integration testing
+export function hasIntegrationCredentials(): {
+ claude: boolean;
+ openai: boolean;
+ gemini: boolean;
+ any: boolean;
+} {
+ const config = getTestConfig();
+
+ return {
+ claude: !!config.claudeApiKey,
+ openai: !!config.openaiApiKey,
+ gemini: !!config.geminiApiKey,
+ any: !!(config.claudeApiKey || config.openaiApiKey || config.geminiApiKey)
+ };
+}
+
+// Mock API responses for testing without real credentials
+export const mockResponses = {
+ claude: {
+ success: 'SUCCESS',
+ counting: '1\n2\n3\n4\n5',
+ error: 'Mock Claude error'
+ },
+ openai: {
+ success: 'SUCCESS',
+ error: 'Mock OpenAI error'
+ },
+ gemini: {
+ success: 'SUCCESS',
+ error: 'Mock Gemini error'
+ }
+};
+
+// Test data generators
+export const testData = {
+ simpleText: () => 'Hello, this is a simple test message.',
+ longText: () => 'This is a longer test message. '.repeat(100),
+ unicodeText: () => 'Hello 🌍! 测试中文! Тест на русском! العربية!',
+ specialChars: () => '!@#$%^&*()_+-=[]{}|;:,.<>?/~`',
+ emptyString: () => '',
+ whitespaceOnly: () => ' \n\t ',
+
+ // Input token generators
+ textTokens: (count: number = 3) =>
+ Array.from({ length: count }, (_, i) => ({ text: `Token ${i + 1}: ` })),
+
+ imageTokens: (count: number = 1) =>
+ Array.from({ length: count }, () => ({
+ img: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=='
+ })),
+
+ mixedTokens: () => [
+ { text: 'Describe this image: ' },
+ { img: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==' }
+ ]
+};
+
+// Performance test utilities
+export const performance = {
+ measureTokenization: (api: any, text: string, iterations: number = 1000) => {
+ const start = performance.now();
+ for (let i = 0; i < iterations; i++) {
+ api.tokenizer(text + i);
+ }
+ const end = performance.now();
+ return {
+ totalTime: end - start,
+ averageTime: (end - start) / iterations,
+ iterations
+ };
+ },
+
+ measureApiCreation: (choice: any, iterations: number = 100) => {
+ const start = performance.now();
+ for (let i = 0; i < iterations; i++) {
+ const models = require('../index').default;
+ models(choice);
+ }
+ const end = performance.now();
+ return {
+ totalTime: end - start,
+ averageTime: (end - start) / iterations,
+ iterations
+ };
+ }
+};
+
+// Error simulation utilities
+export const errorSimulator = {
+ // Simulate network errors
+ networkError: () => new Error('Network connection failed'),
+
+ // Simulate API key errors
+ apiKeyError: () => new Error('Invalid API key'),
+
+ // Simulate timeout errors
+ timeoutError: () => new Error('Request timeout'),
+
+ // Simulate rate limit errors
+ rateLimitError: () => new Error('Rate limit exceeded'),
+
+ // Simulate invalid response errors
+ invalidResponseError: () => new Error('Invalid API response format')
+};
+
+// Logging utilities for tests
+export const testLogger = {
+ info: (message: string, ...args: any[]) => {
+ console.log(`[TEST-INFO] ${message}`, ...args);
+ },
+
+ warn: (message: string, ...args: any[]) => {
+ console.warn(`[TEST-WARN] ${message}`, ...args);
+ },
+
+ error: (message: string, ...args: any[]) => {
+ console.error(`[TEST-ERROR] ${message}`, ...args);
+ },
+
+ debug: (message: string, ...args: any[]) => {
+ if (Bun.env.DEBUG_TESTS === 'true') {
+ console.debug(`[TEST-DEBUG] ${message}`, ...args);
+ }
+ }
+};
+
+// Cleanup utilities
+export const cleanup = {
+ // Clear environment variables after tests
+ clearEnv: (...vars: string[]) => {
+ vars.forEach(varName => {
+ delete Bun.env[varName];
+ });
+ },
+
+ // Reset global state if needed
+ resetGlobal: () => {
+ // Add any global state cleanup here
+ }
+};
+
+// Export a default test setup object
+export default {
+ getTestConfig,
+ hasIntegrationCredentials,
+ mockResponses,
+ testData,
+ performance,
+ errorSimulator,
+ testLogger,
+ cleanup
+}; \ No newline at end of file
diff --git a/tests/test.ts b/tests/test.ts
deleted file mode 100644
index f962364..0000000
--- a/tests/test.ts
+++ /dev/null
@@ -1,25 +0,0 @@
-import Router from "..";
-// import OpenAI from "openai";
-
-async function run() {
- const api = Router({ kimi: "" });
- // const api = new OpenAI({
- // baseURL: "https://api.moonshot.ai/v1",
- // apiKey: Bun.env.MOONSHOT_API_KEY,
- // });
- // const model = "kimi-k2-0711-preview";
- // const api = new OpenAI();
- // const model = "o4-mini";
- // const res = await api.responses.create({ model, input: "Hello!" });
- // const res = await api.chat.completions.create({
- // model,
- // messages: [{ role: "user", content: "Hello there" }],
- // });
- const res = await api.send("henlo");
- console.log({ res });
-}
-run();
-
-// gemini works
-// claude works too.
-// it's the openai API thingy