summaryrefslogtreecommitdiff
path: root/tests/example.ts
blob: 568f5cefb3a05ed6b415f95e4d8d61dec0f36e08 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
/**
 * Example usage of the models library
 * This file demonstrates how to use the LLM routing library with different providers
 */

import models, { AIModelAPI, LLMChoice } from '../index';
import OpenAIResponses from '../src/openai-responses';

// Example configurations for different providers
const examples = {
  // Claude example
  claude: {
    description: 'Claude (Anthropic) example',
    setup: (): LLMChoice => ({ claude: 'claude-3-5-sonnet' }),
    envVars: ['ANTHROPIC_API_KEY']
  },

  // Gemini example
  gemini: {
    description: 'Gemini (Google) example',
    setup: (): LLMChoice => ({ gemini: 'gemini-2.5-pro' }),
    envVars: ['GOOGLE_API_KEY']
  },

  // ChatGPT example (using new OpenAI Responses API)
  chatgpt: {
    description: 'ChatGPT (OpenAI Responses API) example',
    setup: (): LLMChoice => ({ chatgpt: 'gpt-4o' }),
    envVars: ['OPENAI_API_KEY']
  },

  // Direct OpenAI Responses API example
  openaiResponses: {
    description: 'Direct OpenAI Responses API example',
    setup: (): AIModelAPI => new OpenAIResponses({
      baseURL: Bun.env.ZAI_BASE_URL || 'https://api.openai.com/v1',
      apiKey: Bun.env.ZAI_API_KEY || Bun.env.OPENAI_API_KEY || 'your-api-key-here',
      model: Bun.env.TEST_MODEL || 'gpt-4o',
      allowBrowser: true,
      tokenizer: (text: string) => text.length / 4, // Custom tokenizer
      maxTokens: 4000 // Custom max tokens
    }),
    envVars: ['ZAI_BASE_URL', 'ZAI_API_KEY', 'OPENAI_API_KEY', 'TEST_MODEL']
  },

  // DeepSeek example
  deepseek: {
    description: 'DeepSeek example',
    setup: (): LLMChoice => ({ deepseek: 'deepseek-chat' }),
    envVars: ['DEEPSEEK_API_KEY']
  },

  // Kimi example
  kimi: {
    description: 'Kimi (Moonshot) example',
    setup: (): LLMChoice => ({ kimi: 'moonshot-v1-8k' }),
    envVars: ['MOONSHOT_API_KEY']
  },

  // Grok example
  grok: {
    description: 'Grok (X.AI) example',
    setup: (): LLMChoice => ({ grok: 'grok-beta' }),
    envVars: ['XAI_API_KEY']
  },

  // Custom OpenAI-compatible API example
  custom: {
    description: 'Custom OpenAI-compatible API example',
    setup: (): LLMChoice => ({
      openai: {
        url: Bun.env.ZAI_BASE_URL || 'https://api.openai.com/v1',
        apiKey: Bun.env.ZAI_API_KEY || 'your-api-key-here',
        model: Bun.env.TEST_MODEL || 'glm-4.6',
        allowBrowser: true
      }
    }),
    envVars: ['ZAI_BASE_URL', 'ZAI_API_KEY', 'TEST_MODEL']
  }
};

// Run an example with a specific provider
async function runExample(providerName: keyof typeof examples) {
  const example = examples[providerName];

  console.log(`\n=== ${example.description} ===`);

  // Check required environment variables
  const missingVars = example.envVars.filter(varName => !Bun.env[varName]);
  if (missingVars.length > 0) {
    console.warn(`Warning: Missing environment variables: ${missingVars.join(', ')}`);
    console.warn('The example will be created but API calls will likely fail.');
  }

  try {
    // Create the API instance
    const setupResult = example.setup();
    let api: AIModelAPI;
    let configInfo: any;

    // Check if the setup returns an LLMChoice or direct AIModelAPI instance
    if (typeof setupResult === 'object' && 'send' in setupResult) {
      // Direct AIModelAPI instance
      api = setupResult;
      configInfo = 'Direct API instance';
    } else {
      // LLMChoice that needs to be passed to models()
      const choice = setupResult as LLMChoice;
      api = models(choice);
      configInfo = choice;
    }

    console.log(`✅ API instance created successfully`);
    console.log(`   Max tokens: ${api.maxTokens}`);
    console.log(`   Model: ${JSON.stringify(configInfo)}`);

    // Check if it's an OpenAI Responses API instance
    if (api instanceof OpenAIResponses) {
      console.log(`   Using OpenAI Responses API directly`);
    }

    // Test tokenization
    const testText = 'Hello, how are you today?';
    const tokens = api.tokenizer(testText);
    console.log(`   Tokenization: "${testText}" -> ${tokens} tokens`);

    // Test simple API call (will fail without valid credentials)
    console.log(`   Testing API call...`);
    try {
      const response = await api.send('Hello! Please respond with just "API working".');
      console.log(`   ✅ API response: ${response.substring(0, 100)}${response.length > 100 ? '...' : ''}`);
    } catch (error) {
      console.log(`   ❌ API call failed: ${error.message}`);
    }

    // Test streaming
    console.log(`   Testing streaming...`);
    try {
      const chunks: string[] = [];
      const handler = (chunk: string) => {
        chunks.push(chunk);
        process.stdout.write('.');
      };

      await new Promise<void>((resolve) => {
        api.stream('Count from 1 to 3', handler);
        setTimeout(() => {
          console.log(`\n   ✅ Streaming completed (${chunks.length} chunks)`);
          resolve();
        }, 3000);
      });
    } catch (error) {
      console.log(`   ❌ Streaming failed: ${error.message}`);
    }

  } catch (error) {
    console.error(`❌ Failed to create API instance: ${error.message}`);
  }
}

// Run all examples
async function runAllExamples() {
  console.log('🚀 Running LLM Models Library Examples\n');

  console.log('Environment Variables:');
  Object.keys(Bun.env)
    .filter(key => key.includes('API_KEY') || key.includes('BASE_URL') || key.includes('MODEL'))
    .forEach(key => {
      const value = Bun.env[key];
      console.log(`   ${key}: ${value ? '***set***' : 'not set'}`);
    });

  for (const providerName of Object.keys(examples) as (keyof typeof examples)[]) {
    await runExample(providerName);
  }

  console.log('\n✨ All examples completed!');
}

// Interactive example selector
async function interactiveExample() {
  console.log('\n📋 Available providers:');
  Object.entries(examples).forEach(([key, example]) => {
    console.log(`   ${key}: ${example.description}`);
  });

  const provider = process.argv[2];
  if (provider && provider in examples) {
    await runExample(provider as keyof typeof examples);
  } else {
    console.log('\nUsage: bun run example.ts [provider]');
    console.log('Available providers:', Object.keys(examples).join(', '));
    console.log('Or run without arguments to see all examples');
    await runAllExamples();
  }
}

// Test tokenization accuracy across providers
async function testTokenization() {
  console.log('\n🔢 Testing Tokenization Across Providers\n');

  const testTexts = [
    'Hello world',
    'The quick brown fox jumps over the lazy dog.',
    'This is a longer text with multiple sentences. It should have more tokens than shorter texts.',
    'Special chars: !@#$%^&*()_+-=[]{}|;:,.<>?/~`',
    'Unicode: Hello 🌍! 测试中文! Тест на русском! العربية!'
  ];

  for (const providerName of Object.keys(examples) as (keyof typeof examples)[]) {
    try {
      const example = examples[providerName];
      const choice = example.setup();
      const api = models(choice);

      console.log(`${providerName}:`);
      testTexts.forEach(text => {
        const tokens = api.tokenizer(text);
        console.log(`   "${text.substring(0, 50)}${text.length > 50 ? '...' : ''}" -> ${tokens} tokens`);
      });
      console.log('');
    } catch (error) {
      console.log(`${providerName}: Failed to initialize - ${error.message}\n`);
    }
  }
}

// Performance benchmark
async function performanceBenchmark() {
  console.log('\n⚡ Performance Benchmark\n');

  const testText = 'The quick brown fox jumps over the lazy dog. ';
  const iterations = 1000;

  for (const providerName of Object.keys(examples) as (keyof typeof examples)[]) {
    try {
      const example = examples[providerName];
      const choice = example.setup();
      const api = models(choice);

      const startTime = performance.now();
      for (let i = 0; i < iterations; i++) {
        api.tokenizer(testText + i);
      }
      const endTime = performance.now();

      const totalTime = endTime - startTime;
      const avgTime = totalTime / iterations;

      console.log(`${providerName}:`);
      console.log(`   ${iterations} tokenizations in ${totalTime.toFixed(2)}ms`);
      console.log(`   Average: ${avgTime.toFixed(3)}ms per tokenization`);
      console.log('');
    } catch (error) {
      console.log(`${providerName}: Failed to benchmark - ${error.message}\n`);
    }
  }
}

// Main execution
if (import.meta.main) {
  const command = process.argv[2];

  switch (command) {
    case 'tokenize':
      await testTokenization();
      break;
    case 'benchmark':
      await performanceBenchmark();
      break;
    case 'all':
      await runAllExamples();
      break;
    default:
      await interactiveExample();
  }
}

export { examples, runExample, runAllExamples, testTokenization, performanceBenchmark };