summaryrefslogtreecommitdiff
path: root/src/openai.ts
diff options
context:
space:
mode:
Diffstat (limited to 'src/openai.ts')
-rw-r--r--src/openai.ts266
1 files changed, 266 insertions, 0 deletions
diff --git a/src/openai.ts b/src/openai.ts
new file mode 100644
index 0000000..2e15dcf
--- /dev/null
+++ b/src/openai.ts
@@ -0,0 +1,266 @@
+import fs from "fs";
+import OpenAI from "openai";
+import { RESPONSE_LENGTH } from "./logic/constants";
+import type {
+ AResult,
+ ChatMessage,
+ OChoice,
+ OChunk,
+ OMessage,
+ Result,
+} from "./types";
+import OpenAIToolUse from "./openai_tools";
+import type { FileObject } from "openai/src/resources/files.js";
+
+type Message = OpenAI.Chat.Completions.ChatCompletionMessageParam;
+
+type Props = {
+ maxTokens?: number;
+ baseURL?: string;
+ apiKey?: string;
+ tokenizer?: (text: string) => number;
+};
+export default class Conversation {
+ private maxTokens: number = 128_000;
+ private apiKey: string = Bun.env["OPENAI_API_KEY"] || "";
+ private baseURL: string = "https://api.openai.com/v1";
+ private tokenizer: (text: string) => number = (text) => text.length / 3;
+ openai;
+ private model: string = "chatgpt-4o-latest";
+
+ constructor(props: Props) {
+ if (props.apiKey) this.apiKey = props.apiKey;
+ if (props.baseURL) this.baseURL = props.baseURL;
+ this.openai = new OpenAI({ baseURL: this.baseURL, apiKey: this.apiKey });
+ if (props.maxTokens) this.maxTokens = props.maxTokens;
+ if (props.tokenizer) this.tokenizer = props.tokenizer;
+ }
+ public setModel(model: string) {
+ this.model = model;
+ }
+ private mapMessages(input: ChatMessage[]): Message[] {
+ return input.map((m) => {
+ const role = m.author === "openai" ? "assistant" : "user";
+ return { role, content: m.text, name: m.author };
+ });
+ }
+
+ private mapMessagesR1(input: ChatMessage[]): Message[] {
+ return input.reduce((acc: Message[], m, i) => {
+ const prev = acc[i - 1];
+ const role = m.author === "openai" ? "assistant" : "user";
+ const msg: Message = { role, content: m.text, name: m.author };
+ if (prev?.role === role) acc[i - 1] = msg;
+ else acc = [...acc, msg];
+ return acc;
+ }, []);
+ }
+
+ public async send(sys: string, input: ChatMessage[]): AResult<OChoice[]> {
+ const messages = this.mapMessages(input);
+ const sysMsg: Message = { role: "system", content: sys };
+ const allMessages = [sysMsg, ...messages];
+ const truncated = this.truncateHistory(allMessages);
+ const res = await this.apiCall(truncated);
+ return res;
+ }
+
+ public async sendR1(input: ChatMessage[]): AResult<OChoice[]> {
+ const messages = this.mapMessagesR1(input);
+ const truncated = this.truncateHistory(messages);
+ const res = await this.apiCall(truncated);
+ return res;
+ }
+
+ public async stream(
+ sys: string,
+ input: ChatMessage[],
+ handle: (c: any) => void,
+ ) {
+ const messages = this.mapMessages(input);
+ const sysMsg: Message = { role: "system", content: sys };
+ const allMessages = [sysMsg, ...messages];
+ const truncated = this.truncateHistory(allMessages);
+ await this.apiCallStream(truncated, handle);
+ }
+
+ public async streamR1(input: ChatMessage[], handle: (c: any) => void) {
+ const messages = this.mapMessagesR1(input);
+ const truncated = this.truncateHistory(messages);
+ await this.apiCallStream(truncated, handle);
+ }
+
+ private truncateHistory(messages: Message[]): Message[] {
+ const totalTokens = messages.reduce((total, message) => {
+ return total + this.tokenizer(message.content as string);
+ }, 0);
+ while (totalTokens > this.maxTokens && messages.length > 1) {
+ // Always keep the system message if it exists
+ const startIndex = messages[0].role === "system" ? 1 : 0;
+ messages.splice(startIndex, 1);
+ }
+ return messages;
+ }
+
+ private async apiCall(messages: Message[]): AResult<OChoice[]> {
+ try {
+ const completion = await this.openai.chat.completions.create({
+ temperature: 1.3,
+ model: this.model,
+ messages,
+ max_tokens: RESPONSE_LENGTH,
+ });
+ if (!completion) return { error: "null response from openai" };
+ return { ok: completion.choices };
+ } catch (e) {
+ console.log(e, "error in openai api");
+ return { error: `${e}` };
+ }
+ }
+
+ private async apiCallStream(
+ messages: Message[],
+ handle: (c: string) => void,
+ ): Promise<void> {
+ try {
+ const stream = await this.openai.chat.completions.create({
+ temperature: 1.3,
+ model: this.model,
+ messages,
+ max_tokens: RESPONSE_LENGTH,
+ stream: true,
+ });
+
+ for await (const chunk of stream) {
+ for (const choice of chunk.choices) {
+ console.log({ choice });
+ if (!choice.delta) continue;
+ const cont = choice.delta.content;
+ if (!cont) continue;
+ handle(cont);
+ }
+ }
+ } catch (e) {
+ console.log(e, "error in openai api");
+ handle(`Error streaming OpenAI, ${e}`);
+ }
+ }
+
+ // assistant
+ async assistant() {
+ const assistant = await this.openai.beta.assistants.create({
+ name: "Literature professor",
+ instructions:
+ "You are a professor of literature. Use your knowledge to analyze large pieces of text and answer questions from your users.",
+ model: this.model,
+ tools: [{ type: "file_search" }],
+ temperature: 0.7,
+ response_format: { type: "text" },
+ });
+ const vector_store = await this.openai.beta.vectorStores.create({
+ name: "docs",
+ });
+ const tool_resources = {
+ file_search: { vector_store_ids: [vector_store.id] },
+ };
+ const tant = await this.openai.beta.assistants.update(assistant.id, {
+ tool_resources,
+ });
+ const thread = await this.openai.beta.threads.create();
+ const msg = await this.openai.beta.threads.messages.create(thread.id, {
+ role: "user",
+ content:
+ "Greetings, pleasure to meet. Let's get started if you don't mind",
+ });
+ const run = await this.openai.beta.threads.runs.create(thread.id, {
+ assistant_id: assistant.id,
+ instructions: "be nice",
+ });
+ while (run.status === "in_progress") {
+ console.log({ run });
+ }
+ }
+ async lookatFile(fo: FileObject) {
+ const tant = await this.openai.beta.assistants.create({
+ name: "Literature professor",
+ instructions:
+ "You are a professor of literature. Use your knowledge to analyze large pieces of text and answer questions from your users.",
+ model: this.model,
+ tools: [{ type: "file_search" }],
+ temperature: 0.7,
+ response_format: { type: "text" },
+ });
+ const thread = await this.openai.beta.threads.create();
+ await this.openai.beta.threads.messages.create(thread.id, {
+ role: "user",
+ content:
+ "Greetings, pleasure to meet. Let's get started if you don't mind. Look at this file and summarize its contents",
+ attachments: [{ file_id: fo.id, tools: [{ type: "file_search" }] }],
+ });
+ const run = await this.openai.beta.threads.runs.createAndPoll(thread.id, {
+ assistant_id: tant.id,
+ });
+ console.log({ run });
+ const msgs = await this.openai.beta.threads.messages.list(run.thread_id);
+ console.log({ msgs });
+ for (let m of msgs.data) {
+ console.log(m, "message on thread");
+ }
+ }
+
+ async uploadFile(res: Response) {
+ // const ff = fs.createReadStream("./lol")
+ const file = await this.openai.files.create({
+ file: res,
+ purpose: "assistants",
+ });
+ console.log({ file }, "uploaded");
+ return file;
+
+ // {
+ // "id": "file-abc123",
+ // "object": "file",
+ // "bytes": 120000,
+ // "created_at": 1677610602,
+ // "filename": "mydata.jsonl",
+ // "purpose": "fine-tune",
+ // }
+ }
+
+ // async analyzeFile(){
+ // const huh = await this.openai.beta.vectorStores.files.uploadAndPoll()
+ // }
+
+ // mcp
+
+ async mcp() {
+ const res = await fetch("http://localhost:8900/list");
+ const list = await res.json();
+ this.tryTools(list);
+ }
+
+ async tryTools(tools: OpenAI.Chat.Completions.ChatCompletionTool[]) {
+ const messages: Message[] = [
+ { role: "user", content: "What's on my twitter timeline right now?" },
+ ];
+ const completion = await this.openai.chat.completions.create({
+ model: "gpt-4o-2024-11-20",
+ messages,
+ tools,
+ });
+ if (!completion) return { error: "null response from openai" };
+
+ for (let choice of completion.choices) {
+ console.log({ choice });
+ if (choice.message.tool_calls) {
+ const instance = new OpenAIToolUse(
+ this.openai,
+ "gpt-4o-2024-11-20",
+ tools,
+ choice.message,
+ choice.message.tool_calls,
+ );
+ }
+ }
+ }
+}