summaryrefslogtreecommitdiff
path: root/tests/example.ts
diff options
context:
space:
mode:
authorpolwex <polwex@sortug.com>2025-10-19 12:54:25 +0700
committerpolwex <polwex@sortug.com>2025-10-19 12:54:25 +0700
commit8815d3c1d40550470c5bc972bc16bd4966735154 (patch)
tree92ef606b568035b9e88d89286be3330f4b84af1e /tests/example.ts
parentba16ebcbe36c1a1cbdb1d1379cb3f9c3a086acdf (diff)
new openai responses api and some claude made testsHEADmaster
Diffstat (limited to 'tests/example.ts')
-rw-r--r--tests/example.ts279
1 files changed, 279 insertions, 0 deletions
diff --git a/tests/example.ts b/tests/example.ts
new file mode 100644
index 0000000..568f5ce
--- /dev/null
+++ b/tests/example.ts
@@ -0,0 +1,279 @@
+/**
+ * Example usage of the models library
+ * This file demonstrates how to use the LLM routing library with different providers
+ */
+
+import models, { AIModelAPI, LLMChoice } from '../index';
+import OpenAIResponses from '../src/openai-responses';
+
+// Example configurations for different providers
+const examples = {
+ // Claude example
+ claude: {
+ description: 'Claude (Anthropic) example',
+ setup: (): LLMChoice => ({ claude: 'claude-3-5-sonnet' }),
+ envVars: ['ANTHROPIC_API_KEY']
+ },
+
+ // Gemini example
+ gemini: {
+ description: 'Gemini (Google) example',
+ setup: (): LLMChoice => ({ gemini: 'gemini-2.5-pro' }),
+ envVars: ['GOOGLE_API_KEY']
+ },
+
+ // ChatGPT example (using new OpenAI Responses API)
+ chatgpt: {
+ description: 'ChatGPT (OpenAI Responses API) example',
+ setup: (): LLMChoice => ({ chatgpt: 'gpt-4o' }),
+ envVars: ['OPENAI_API_KEY']
+ },
+
+ // Direct OpenAI Responses API example
+ openaiResponses: {
+ description: 'Direct OpenAI Responses API example',
+ setup: (): AIModelAPI => new OpenAIResponses({
+ baseURL: Bun.env.ZAI_BASE_URL || 'https://api.openai.com/v1',
+ apiKey: Bun.env.ZAI_API_KEY || Bun.env.OPENAI_API_KEY || 'your-api-key-here',
+ model: Bun.env.TEST_MODEL || 'gpt-4o',
+ allowBrowser: true,
+ tokenizer: (text: string) => text.length / 4, // Custom tokenizer
+ maxTokens: 4000 // Custom max tokens
+ }),
+ envVars: ['ZAI_BASE_URL', 'ZAI_API_KEY', 'OPENAI_API_KEY', 'TEST_MODEL']
+ },
+
+ // DeepSeek example
+ deepseek: {
+ description: 'DeepSeek example',
+ setup: (): LLMChoice => ({ deepseek: 'deepseek-chat' }),
+ envVars: ['DEEPSEEK_API_KEY']
+ },
+
+ // Kimi example
+ kimi: {
+ description: 'Kimi (Moonshot) example',
+ setup: (): LLMChoice => ({ kimi: 'moonshot-v1-8k' }),
+ envVars: ['MOONSHOT_API_KEY']
+ },
+
+ // Grok example
+ grok: {
+ description: 'Grok (X.AI) example',
+ setup: (): LLMChoice => ({ grok: 'grok-beta' }),
+ envVars: ['XAI_API_KEY']
+ },
+
+ // Custom OpenAI-compatible API example
+ custom: {
+ description: 'Custom OpenAI-compatible API example',
+ setup: (): LLMChoice => ({
+ openai: {
+ url: Bun.env.ZAI_BASE_URL || 'https://api.openai.com/v1',
+ apiKey: Bun.env.ZAI_API_KEY || 'your-api-key-here',
+ model: Bun.env.TEST_MODEL || 'glm-4.6',
+ allowBrowser: true
+ }
+ }),
+ envVars: ['ZAI_BASE_URL', 'ZAI_API_KEY', 'TEST_MODEL']
+ }
+};
+
+// Run an example with a specific provider
+async function runExample(providerName: keyof typeof examples) {
+ const example = examples[providerName];
+
+ console.log(`\n=== ${example.description} ===`);
+
+ // Check required environment variables
+ const missingVars = example.envVars.filter(varName => !Bun.env[varName]);
+ if (missingVars.length > 0) {
+ console.warn(`Warning: Missing environment variables: ${missingVars.join(', ')}`);
+ console.warn('The example will be created but API calls will likely fail.');
+ }
+
+ try {
+ // Create the API instance
+ const setupResult = example.setup();
+ let api: AIModelAPI;
+ let configInfo: any;
+
+ // Check if the setup returns an LLMChoice or direct AIModelAPI instance
+ if (typeof setupResult === 'object' && 'send' in setupResult) {
+ // Direct AIModelAPI instance
+ api = setupResult;
+ configInfo = 'Direct API instance';
+ } else {
+ // LLMChoice that needs to be passed to models()
+ const choice = setupResult as LLMChoice;
+ api = models(choice);
+ configInfo = choice;
+ }
+
+ console.log(`✅ API instance created successfully`);
+ console.log(` Max tokens: ${api.maxTokens}`);
+ console.log(` Model: ${JSON.stringify(configInfo)}`);
+
+ // Check if it's an OpenAI Responses API instance
+ if (api instanceof OpenAIResponses) {
+ console.log(` Using OpenAI Responses API directly`);
+ }
+
+ // Test tokenization
+ const testText = 'Hello, how are you today?';
+ const tokens = api.tokenizer(testText);
+ console.log(` Tokenization: "${testText}" -> ${tokens} tokens`);
+
+ // Test simple API call (will fail without valid credentials)
+ console.log(` Testing API call...`);
+ try {
+ const response = await api.send('Hello! Please respond with just "API working".');
+ console.log(` ✅ API response: ${response.substring(0, 100)}${response.length > 100 ? '...' : ''}`);
+ } catch (error) {
+ console.log(` ❌ API call failed: ${error.message}`);
+ }
+
+ // Test streaming
+ console.log(` Testing streaming...`);
+ try {
+ const chunks: string[] = [];
+ const handler = (chunk: string) => {
+ chunks.push(chunk);
+ process.stdout.write('.');
+ };
+
+ await new Promise<void>((resolve) => {
+ api.stream('Count from 1 to 3', handler);
+ setTimeout(() => {
+ console.log(`\n ✅ Streaming completed (${chunks.length} chunks)`);
+ resolve();
+ }, 3000);
+ });
+ } catch (error) {
+ console.log(` ❌ Streaming failed: ${error.message}`);
+ }
+
+ } catch (error) {
+ console.error(`❌ Failed to create API instance: ${error.message}`);
+ }
+}
+
+// Run all examples
+async function runAllExamples() {
+ console.log('🚀 Running LLM Models Library Examples\n');
+
+ console.log('Environment Variables:');
+ Object.keys(Bun.env)
+ .filter(key => key.includes('API_KEY') || key.includes('BASE_URL') || key.includes('MODEL'))
+ .forEach(key => {
+ const value = Bun.env[key];
+ console.log(` ${key}: ${value ? '***set***' : 'not set'}`);
+ });
+
+ for (const providerName of Object.keys(examples) as (keyof typeof examples)[]) {
+ await runExample(providerName);
+ }
+
+ console.log('\n✨ All examples completed!');
+}
+
+// Interactive example selector
+async function interactiveExample() {
+ console.log('\n📋 Available providers:');
+ Object.entries(examples).forEach(([key, example]) => {
+ console.log(` ${key}: ${example.description}`);
+ });
+
+ const provider = process.argv[2];
+ if (provider && provider in examples) {
+ await runExample(provider as keyof typeof examples);
+ } else {
+ console.log('\nUsage: bun run example.ts [provider]');
+ console.log('Available providers:', Object.keys(examples).join(', '));
+ console.log('Or run without arguments to see all examples');
+ await runAllExamples();
+ }
+}
+
+// Test tokenization accuracy across providers
+async function testTokenization() {
+ console.log('\n🔢 Testing Tokenization Across Providers\n');
+
+ const testTexts = [
+ 'Hello world',
+ 'The quick brown fox jumps over the lazy dog.',
+ 'This is a longer text with multiple sentences. It should have more tokens than shorter texts.',
+ 'Special chars: !@#$%^&*()_+-=[]{}|;:,.<>?/~`',
+ 'Unicode: Hello 🌍! 测试中文! Тест на русском! العربية!'
+ ];
+
+ for (const providerName of Object.keys(examples) as (keyof typeof examples)[]) {
+ try {
+ const example = examples[providerName];
+ const choice = example.setup();
+ const api = models(choice);
+
+ console.log(`${providerName}:`);
+ testTexts.forEach(text => {
+ const tokens = api.tokenizer(text);
+ console.log(` "${text.substring(0, 50)}${text.length > 50 ? '...' : ''}" -> ${tokens} tokens`);
+ });
+ console.log('');
+ } catch (error) {
+ console.log(`${providerName}: Failed to initialize - ${error.message}\n`);
+ }
+ }
+}
+
+// Performance benchmark
+async function performanceBenchmark() {
+ console.log('\n⚡ Performance Benchmark\n');
+
+ const testText = 'The quick brown fox jumps over the lazy dog. ';
+ const iterations = 1000;
+
+ for (const providerName of Object.keys(examples) as (keyof typeof examples)[]) {
+ try {
+ const example = examples[providerName];
+ const choice = example.setup();
+ const api = models(choice);
+
+ const startTime = performance.now();
+ for (let i = 0; i < iterations; i++) {
+ api.tokenizer(testText + i);
+ }
+ const endTime = performance.now();
+
+ const totalTime = endTime - startTime;
+ const avgTime = totalTime / iterations;
+
+ console.log(`${providerName}:`);
+ console.log(` ${iterations} tokenizations in ${totalTime.toFixed(2)}ms`);
+ console.log(` Average: ${avgTime.toFixed(3)}ms per tokenization`);
+ console.log('');
+ } catch (error) {
+ console.log(`${providerName}: Failed to benchmark - ${error.message}\n`);
+ }
+ }
+}
+
+// Main execution
+if (import.meta.main) {
+ const command = process.argv[2];
+
+ switch (command) {
+ case 'tokenize':
+ await testTokenization();
+ break;
+ case 'benchmark':
+ await performanceBenchmark();
+ break;
+ case 'all':
+ await runAllExamples();
+ break;
+ default:
+ await interactiveExample();
+ }
+}
+
+export { examples, runExample, runAllExamples, testTokenization, performanceBenchmark }; \ No newline at end of file