1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
import {
Chat,
GoogleGenAI,
type Content,
type GeneratedImage,
type GeneratedVideo,
} from "@google/genai";
import { RESPONSE_LENGTH } from "./logic/constants";
import type {
AIModelAPI,
ChatMessage,
OChoice,
OChunk,
OMessage,
} from "./types";
import type { AsyncRes } from "sortug";
export default class GeminiAPI {
tokenizer: (text: string) => number;
maxTokens: number;
private model: string;
api: GoogleGenAI;
chats: Map<string, Chat> = new Map<string, Chat>();
constructor(
maxTokens = 200_000,
tokenizer: (text: string) => number = (text) => text.length / 3,
model?: string,
) {
this.maxTokens = maxTokens;
this.tokenizer = tokenizer;
const gem = new GoogleGenAI({ apiKey: Bun.env["GEMINI_API_KEY"]! });
this.api = gem;
this.model = model || "gemini-2.5-pro-preview-05-06 ";
}
createChat({ name, history }: { name?: string; history?: Content[] }) {
const chat = this.api.chats.create({ model: this.model, history });
this.chats.set(name ? name : Date.now().toString(), chat);
}
async followChat(name: string, message: string): AsyncRes<string> {
const chat = this.chats.get(name);
if (!chat) return { error: "no chat with that name" };
else {
const response = await chat.sendMessage({ message });
const text = response.text;
return { ok: text || "" };
}
}
async followChatStream(
name: string,
message: string,
handler: (data: string) => void,
) {
const chat = this.chats.get(name);
if (!chat) throw new Error("no chat!");
else {
const response = await chat.sendMessageStream({ message });
for await (const chunk of response) {
const text = chunk.text;
handler(text || "");
}
}
}
async send(message: string, systemPrompt?: string): AsyncRes<string> {
try {
const opts = {
model: this.model,
contents: message,
};
const fopts = systemPrompt
? { ...opts, config: { systemInstruction: systemPrompt } }
: opts;
const response = await this.api.models.generateContent(fopts);
return { ok: response.text || "" };
} catch (e) {
return { error: `${e}` };
}
}
async sendStream(
handler: (s: string) => void,
message: string,
systemPrompt?: string,
) {
const opts = {
model: this.model,
contents: message,
};
const fopts = systemPrompt
? { ...opts, config: { systemInstruction: systemPrompt } }
: opts;
const response = await this.api.models.generateContentStream(fopts);
for await (const chunk of response) {
handler(chunk.text || "");
}
}
async makeImage(prompt: string): AsyncRes<GeneratedImage[]> {
try {
const response = await this.api.models.generateImages({
model: this.model,
prompt,
});
// TODO if empty or undefined return error
return { ok: response.generatedImages || [] };
} catch (e) {
return { error: `${e}` };
}
}
async makeVideo({
prompt,
image,
}: {
prompt?: string;
image?: string;
}): AsyncRes<GeneratedVideo[]> {
try {
const response = await this.api.models.generateVideos({
model: this.model,
prompt,
});
// TODO if empty or undefined return error
return { ok: response.response?.generatedVideos || [] };
} catch (e) {
return { error: `${e}` };
}
}
}
// TODO how to use caches
// https://ai.google.dev/api/caching
|