1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
|
/**
* Test configuration and setup utilities
* This file provides configuration for running tests with different LLM providers
*/
// Environment variable configuration
export interface TestConfig {
baseUrl: string;
apiKey: string;
model: string;
claudeApiKey?: string;
openaiApiKey?: string;
geminiApiKey?: string;
}
// Get test configuration from environment variables
export function getTestConfig(): TestConfig {
return {
baseUrl: Bun.env.ZAI_BASE_URL || 'https://api.openai.com/v1',
apiKey: Bun.env.ZAI_API_KEY || 'test-api-key',
model: Bun.env.TEST_MODEL || 'glm-4.6',
claudeApiKey: Bun.env.CLAUDE_API_KEY,
openaiApiKey: Bun.env.OPENAI_API_KEY,
geminiApiKey: Bun.env.GEMINI_API_KEY
};
}
// Check if we have real API credentials for integration testing
export function hasIntegrationCredentials(): {
claude: boolean;
openai: boolean;
gemini: boolean;
any: boolean;
} {
const config = getTestConfig();
return {
claude: !!config.claudeApiKey,
openai: !!config.openaiApiKey,
gemini: !!config.geminiApiKey,
any: !!(config.claudeApiKey || config.openaiApiKey || config.geminiApiKey)
};
}
// Mock API responses for testing without real credentials
export const mockResponses = {
claude: {
success: 'SUCCESS',
counting: '1\n2\n3\n4\n5',
error: 'Mock Claude error'
},
openai: {
success: 'SUCCESS',
error: 'Mock OpenAI error'
},
gemini: {
success: 'SUCCESS',
error: 'Mock Gemini error'
}
};
// Test data generators
export const testData = {
simpleText: () => 'Hello, this is a simple test message.',
longText: () => 'This is a longer test message. '.repeat(100),
unicodeText: () => 'Hello 🌍! 测试中文! Тест на русском! العربية!',
specialChars: () => '!@#$%^&*()_+-=[]{}|;:,.<>?/~`',
emptyString: () => '',
whitespaceOnly: () => ' \n\t ',
// Input token generators
textTokens: (count: number = 3) =>
Array.from({ length: count }, (_, i) => ({ text: `Token ${i + 1}: ` })),
imageTokens: (count: number = 1) =>
Array.from({ length: count }, () => ({
img: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=='
})),
mixedTokens: () => [
{ text: 'Describe this image: ' },
{ img: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==' }
]
};
// Performance test utilities
export const performance = {
measureTokenization: (api: any, text: string, iterations: number = 1000) => {
const start = performance.now();
for (let i = 0; i < iterations; i++) {
api.tokenizer(text + i);
}
const end = performance.now();
return {
totalTime: end - start,
averageTime: (end - start) / iterations,
iterations
};
},
measureApiCreation: (choice: any, iterations: number = 100) => {
const start = performance.now();
for (let i = 0; i < iterations; i++) {
const models = require('../index').default;
models(choice);
}
const end = performance.now();
return {
totalTime: end - start,
averageTime: (end - start) / iterations,
iterations
};
}
};
// Error simulation utilities
export const errorSimulator = {
// Simulate network errors
networkError: () => new Error('Network connection failed'),
// Simulate API key errors
apiKeyError: () => new Error('Invalid API key'),
// Simulate timeout errors
timeoutError: () => new Error('Request timeout'),
// Simulate rate limit errors
rateLimitError: () => new Error('Rate limit exceeded'),
// Simulate invalid response errors
invalidResponseError: () => new Error('Invalid API response format')
};
// Logging utilities for tests
export const testLogger = {
info: (message: string, ...args: any[]) => {
console.log(`[TEST-INFO] ${message}`, ...args);
},
warn: (message: string, ...args: any[]) => {
console.warn(`[TEST-WARN] ${message}`, ...args);
},
error: (message: string, ...args: any[]) => {
console.error(`[TEST-ERROR] ${message}`, ...args);
},
debug: (message: string, ...args: any[]) => {
if (Bun.env.DEBUG_TESTS === 'true') {
console.debug(`[TEST-DEBUG] ${message}`, ...args);
}
}
};
// Cleanup utilities
export const cleanup = {
// Clear environment variables after tests
clearEnv: (...vars: string[]) => {
vars.forEach(varName => {
delete Bun.env[varName];
});
},
// Reset global state if needed
resetGlobal: () => {
// Add any global state cleanup here
}
};
// Export a default test setup object
export default {
getTestConfig,
hasIntegrationCredentials,
mockResponses,
testData,
performance,
errorSimulator,
testLogger,
cleanup
};
|