summaryrefslogtreecommitdiff
path: root/tests/performance.test.ts
diff options
context:
space:
mode:
Diffstat (limited to 'tests/performance.test.ts')
-rw-r--r--tests/performance.test.ts465
1 files changed, 465 insertions, 0 deletions
diff --git a/tests/performance.test.ts b/tests/performance.test.ts
new file mode 100644
index 0000000..59c98f5
--- /dev/null
+++ b/tests/performance.test.ts
@@ -0,0 +1,465 @@
+import { describe, test, expect, beforeAll, afterAll } from 'bun:test';
+import models, { AIModelAPI, LLMChoice, InputToken } from '../index';
+
+describe('Performance Tests', () => {
+ describe('Tokenization Performance', () => {
+ test('should tokenize large texts efficiently', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ // Generate a large text (100KB)
+ const largeText = 'The quick brown fox jumps over the lazy dog. '.repeat(2000);
+ expect(largeText.length).toBeGreaterThan(100000);
+
+ const startTime = performance.now();
+ const tokens = api.tokenizer(largeText);
+ const endTime = performance.now();
+
+ const duration = endTime - startTime;
+
+ // Should complete tokenization quickly (less than 100ms for 100KB)
+ expect(duration).toBeLessThan(100);
+ expect(tokens).toBeGreaterThan(0);
+ });
+
+ test('should handle repeated tokenization calls efficiently', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const testText = 'This is a performance test for repeated tokenization calls. ';
+ const iterations = 1000;
+
+ const startTime = performance.now();
+ for (let i = 0; i < iterations; i++) {
+ api.tokenizer(testText + i);
+ }
+ const endTime = performance.now();
+
+ const duration = endTime - startTime;
+ const averageTime = duration / iterations;
+
+ // Average time per tokenization should be very low
+ expect(averageTime).toBeLessThan(1); // Less than 1ms per call
+ expect(duration).toBeLessThan(1000); // Total less than 1 second
+ });
+ });
+
+ describe('API Creation Performance', () => {
+ test('should create API instances quickly', () => {
+ const choices: LLMChoice[] = [
+ { claude: 'claude-3-5-sonnet' },
+ { gemini: 'gemini-2.5-pro' },
+ { chatgpt: 'gpt-3.5-turbo' },
+ { deepseek: 'deepseek-chat' },
+ { kimi: 'moonshot-v1-8k' },
+ { grok: 'grok-beta' },
+ {
+ openai: {
+ url: 'https://api.openai.com/v1',
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo'
+ }
+ }
+ ];
+
+ const iterations = 100;
+
+ const startTime = performance.now();
+ for (let i = 0; i < iterations; i++) {
+ const choice = choices[i % choices.length];
+ models(choice);
+ }
+ const endTime = performance.now();
+
+ const duration = endTime - startTime;
+ const averageTime = duration / iterations;
+
+ // API creation should be fast
+ expect(averageTime).toBeLessThan(10); // Less than 10ms per instance
+ expect(duration).toBeLessThan(1000); // Total less than 1 second
+ });
+ });
+
+ describe('Memory Usage', () => {
+ test('should not leak memory with repeated API creation', () => {
+ const initialMemory = process.memoryUsage();
+
+ // Create many API instances
+ for (let i = 0; i < 1000; i++) {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+ // Use the API briefly
+ api.tokenizer('test');
+ }
+
+ // Force garbage collection if available
+ if (global.gc) {
+ global.gc();
+ }
+
+ const finalMemory = process.memoryUsage();
+ const memoryIncrease = finalMemory.heapUsed - initialMemory.heapUsed;
+
+ // Memory increase should be reasonable (less than 50MB)
+ expect(memoryIncrease).toBeLessThan(50 * 1024 * 1024);
+ });
+
+ test('should handle large token arrays efficiently', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ // Create a large input array
+ const largeInput: InputToken[] = [];
+ for (let i = 0; i < 1000; i++) {
+ largeInput.push({ text: `Token number ${i}. ` });
+ }
+
+ const startTime = performance.now();
+ // Just test that it doesn't crash or take too long
+ expect(() => {
+ const promise = api.send(largeInput);
+ expect(promise).toBeDefined();
+ }).not.toThrow();
+ const endTime = performance.now();
+
+ // Should handle large inputs quickly
+ expect(endTime - startTime).toBeLessThan(100);
+ });
+ });
+});
+
+describe('Edge Cases', () => {
+ describe('Input Validation', () => {
+ test('should handle empty string inputs', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const promise = api.send('');
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+
+ test('should handle whitespace-only inputs', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const whitespaceInputs = [' ', '\n\t', ' \n \t ', '\r\n'];
+
+ for (const input of whitespaceInputs) {
+ const promise = api.send(input);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ }
+ });
+
+ test('should handle very long single words', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const longWord = 'a'.repeat(10000);
+ const promise = api.send(longWord);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+
+ test('should handle special characters and Unicode', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const specialInputs = [
+ 'Hello ๐ŸŒ! ๐Ÿš€ ๐ŸŽ‰',
+ 'ๆต‹่ฏ•ไธญๆ–‡่พ“ๅ…ฅ',
+ 'ะขะตัั‚ ะฝะฐ ั€ัƒััะบะพะผ',
+ 'ุงู„ุนุฑุจูŠุฉ ุงุฎุชุจุงุฑ',
+ '๐Ÿ”ฅ๐Ÿ’ฏ๐Ÿš€๐Ÿ’ช๐ŸŽฏ',
+ 'Special chars: !@#$%^&*()_+-=[]{}|;:,.<>?',
+ 'Emoji string: ๐Ÿ˜Š๐Ÿ˜‚โค๏ธ๐ŸŽ‰๐Ÿค”๐Ÿ˜ด๐Ÿ™„'
+ ];
+
+ for (const input of specialInputs) {
+ const promise = api.send(input);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ }
+ });
+
+ test('should handle malformed image data', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const malformedImages = [
+ { img: 'not-a-valid-image' },
+ { img: 'data:image/png;base64,' }, // Empty base64
+ { img: 'data:invalid-format;base64,abc123' },
+ { img: '' } // Empty image
+ ];
+
+ for (const imgToken of malformedImages) {
+ const input: InputToken[] = [
+ { text: 'Describe this:' },
+ imgToken
+ ];
+ const promise = api.send(input);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ }
+ });
+
+ test('should handle empty input arrays', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const emptyInput: InputToken[] = [];
+ const promise = api.send(emptyInput);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+
+ test('should handle arrays with empty tokens', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const inputsWithEmptyTokens: InputToken[][] = [
+ [{ text: '' }],
+ [{ text: 'Hello' }, { text: '' }],
+ [{ text: '' }, { text: 'World' }],
+ [{ text: 'Hello' }, { text: '' }, { text: 'World' }]
+ ];
+
+ for (const input of inputsWithEmptyTokens) {
+ const promise = api.send(input);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ }
+ });
+ });
+
+ describe('Configuration Edge Cases', () => {
+ test('should handle minimal configuration', () => {
+ const choices: LLMChoice[] = [
+ { claude: '' },
+ { gemini: '' },
+ { chatgpt: '' },
+ { deepseek: '' },
+ { kimi: '' },
+ { grok: '' }
+ ];
+
+ choices.forEach(choice => {
+ expect(() => {
+ const api = models(choice);
+ expect(api).toBeDefined();
+ }).not.toThrow();
+ });
+ });
+
+ test('should handle very long model names', () => {
+ const longModelName = 'model-name-that-is-extremely-long-and-unrealistic-but-should-still-work-without-crashing'.repeat(10);
+ const choice: LLMChoice = { claude: longModelName };
+
+ expect(() => {
+ const api = models(choice);
+ expect(api).toBeDefined();
+ }).not.toThrow();
+ });
+
+ test('should handle OpenAI configuration with minimal required fields', () => {
+ const minimalConfigs = [
+ {
+ openai: {
+ url: 'https://api.openai.com/v1',
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo'
+ }
+ },
+ {
+ openai: {
+ url: 'https://api.openai.com/v1',
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo',
+ allowBrowser: false
+ }
+ },
+ {
+ openai: {
+ url: 'https://api.openai.com/v1',
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo',
+ allowBrowser: true
+ }
+ }
+ ];
+
+ minimalConfigs.forEach(config => {
+ expect(() => {
+ const api = models(config);
+ expect(api).toBeDefined();
+ }).not.toThrow();
+ });
+ });
+
+ test('should handle malformed URLs gracefully', () => {
+ const malformedUrls = [
+ 'not-a-url',
+ 'http://',
+ 'https://',
+ 'ftp://example.com',
+ 'javascript:alert(1)',
+ 'file:///etc/passwd'
+ ];
+
+ malformedUrls.forEach(url => {
+ const config: LLMChoice = {
+ openai: {
+ url: url,
+ apiKey: 'test-key',
+ model: 'gpt-3.5-turbo'
+ }
+ };
+
+ expect(() => {
+ const api = models(config);
+ expect(api).toBeDefined();
+ }).not.toThrow();
+ });
+ });
+ });
+
+ describe('Concurrent Operations', () => {
+ test('should handle multiple simultaneous API calls', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const promises = [];
+ for (let i = 0; i < 50; i++) {
+ promises.push(api.send(`Test message ${i}`).catch(() => `Error ${i}`));
+ }
+
+ const results = await Promise.allSettled(promises);
+ expect(results.length).toBe(50);
+
+ // All promises should be settled (either fulfilled or rejected)
+ results.forEach(result => {
+ expect(result.status).toBeOneOf(['fulfilled', 'rejected']);
+ });
+ });
+
+ test('should handle multiple simultaneous streams', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ expect(() => {
+ for (let i = 0; i < 10; i++) {
+ const handler = (data: string) => console.log(`Stream ${i}:`, data);
+ api.stream(`Message ${i}`, handler);
+ }
+ }).not.toThrow();
+ });
+
+ test('should handle mixing streaming and non-streaming calls', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ // Start some streams
+ for (let i = 0; i < 5; i++) {
+ const handler = (data: string) => console.log(`Stream ${i}:`, data);
+ api.stream(`Stream message ${i}`, handler);
+ }
+
+ // Also make some regular calls
+ const promises = [];
+ for (let i = 0; i < 5; i++) {
+ promises.push(api.send(`Regular message ${i}`).catch(() => null));
+ }
+
+ const results = await Promise.allSettled(promises);
+ expect(results.length).toBe(5);
+ });
+ });
+
+ describe('Resource Management', () => {
+ test('should handle rapid model switching', () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const models = ['claude-3-5-sonnet', 'claude-3-haiku', 'claude-3-opus'];
+ const iterations = 100;
+
+ expect(() => {
+ for (let i = 0; i < iterations; i++) {
+ const model = models[i % models.length];
+ api.setModel(model);
+ }
+ }).not.toThrow();
+
+ // Should still be functional
+ expect(() => api.tokenizer('test')).not.toThrow();
+ });
+
+ test('should handle system prompt changes efficiently', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const systemPrompts = [
+ 'You are a helpful assistant.',
+ 'You are a creative writer.',
+ 'You are a technical expert.',
+ 'You are a friendly chatbot.'
+ ];
+
+ const promises = systemPrompts.map(prompt =>
+ api.send('Hello', prompt).catch(() => null)
+ );
+
+ const results = await Promise.allSettled(promises);
+ expect(results.length).toBe(systemPrompts.length);
+ });
+ });
+
+ describe('Extreme Cases', () => {
+ test('should handle extremely large system prompts', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ const largeSystemPrompt = 'You are a helpful assistant. '.repeat(10000);
+ expect(largeSystemPrompt.length).toBeGreaterThan(200000);
+
+ const promise = api.send('Hello', largeSystemPrompt);
+ expect(promise).toBeDefined();
+ expect(typeof promise.then).toBe('function');
+ });
+
+ test('should handle deep nesting of input tokens', async () => {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+
+ // Create a very deep input array
+ const deepInput: InputToken[] = [];
+ for (let i = 0; i < 10000; i++) {
+ deepInput.push({ text: `Item ${i}: ` });
+ }
+
+ const startTime = performance.now();
+ const promise = api.send(deepInput);
+ const endTime = performance.now();
+
+ expect(promise).toBeDefined();
+ expect(endTime - startTime).toBeLessThan(1000); // Should be fast
+ });
+
+ test('should handle rapid creation and destruction', () => {
+ const startTime = performance.now();
+
+ for (let i = 0; i < 1000; i++) {
+ const choice: LLMChoice = { claude: 'claude-3-5-sonnet' };
+ const api = models(choice);
+ // Use it briefly
+ api.tokenizer(`Test ${i}`);
+ // Let it go out of scope
+ }
+
+ const endTime = performance.now();
+ expect(endTime - startTime).toBeLessThan(5000); // Should complete in 5 seconds
+ });
+ });
+}); \ No newline at end of file