diff options
| author | polwex <polwex@sortug.com> | 2025-07-23 07:42:10 +0700 |
|---|---|---|
| committer | polwex <polwex@sortug.com> | 2025-07-23 07:42:10 +0700 |
| commit | a23f430e2afd7d9ea462f71c2fd1568d8e1dba38 (patch) | |
| tree | 67b4b53c8009e4c342fa36ec35520023bafd6368 /src/genericnew.ts | |
| parent | de917196d3602197a90e9eaa7cf7f8b5d0c7718e (diff) | |
turns out kimi an others dont implement the new openai sdk
Diffstat (limited to 'src/genericnew.ts')
| -rw-r--r-- | src/genericnew.ts | 169 |
1 files changed, 169 insertions, 0 deletions
diff --git a/src/genericnew.ts b/src/genericnew.ts new file mode 100644 index 0000000..3690dc6 --- /dev/null +++ b/src/genericnew.ts @@ -0,0 +1,169 @@ +import OpenAI from "openai"; +import { MAX_TOKENS, RESPONSE_LENGTH } from "./logic/constants"; +import type { AIModelAPI, InputToken } from "./types"; +import type { AsyncRes } from "sortug"; +import type { + ResponseCreateParamsBase, + ResponseCreateParamsNonStreaming, + ResponseCreateParamsStreaming, + ResponseInput, +} from "openai/resources/responses/responses.mjs"; + +type Props = { + baseURL: string; + apiKey: string | undefined; + model?: string; + maxTokens?: number; + tokenizer?: (text: string) => number; +}; +export default class OpenAIAPI implements AIModelAPI { + private apiKey; + private baseURL; + private api; + maxTokens: number = MAX_TOKENS; + tokenizer: (text: string) => number = (text) => text.length / 3; + model; + + constructor(props: Props) { + if (!props.apiKey) throw new Error("NO API KEY"); + console.log({ props }); + this.apiKey = props.apiKey; + this.baseURL = props.baseURL; + this.api = new OpenAI({ baseURL: this.baseURL, apiKey: this.apiKey }); + this.model = props.model || ""; + if (props.maxTokens) this.maxTokens = props.maxTokens; + if (props.tokenizer) this.tokenizer = props.tokenizer; + } + public setModel(model: string) { + this.model = model; + } + + public buildInput(tokens: InputToken[]): ResponseInput { + return [ + { + role: "user", + content: tokens.map((t) => + "text" in t + ? { type: "input_text", text: t.text } + : "img" in t + ? { type: "input_image", image_url: t.img, detail: "auto" } + : { type: "input_text", text: "oy vey" }, + ), + }, + ]; + } + + // OpenAI SDK has three kinds ReponseInputContent: text image and file + // images can be URLs or base64 dataurl thingies + // + public async send( + inpt: string | InputToken[], + sys?: string, + ): AsyncRes<string> { + const input = typeof inpt === "string" ? inpt : this.buildInput(inpt); + const params = sys ? { instructions: sys, input } : { input }; + const res = await this.apiCall(params); + if ("error" in res) return res; + else { + try { + return { ok: res.ok.output_text }; + } catch (e) { + return { error: `${e}` }; + } + } + } + + public async stream( + inpt: string | InputToken[], + handle: (c: string) => void, + sys?: string, + ) { + const input = typeof inpt === "string" ? inpt : this.buildInput(inpt); + const params = sys ? { instructions: sys, input } : { input }; + await this.apiCallStream(params, handle); + } + + // TODO custom temperature? + private async apiCall( + params: ResponseCreateParamsNonStreaming, + ): AsyncRes<OpenAI.Responses.Response> { + try { + const res = await this.api.responses.create({ + ...params, + // temperature: 1.3, + model: params.model || this.model, + input: params.input, + max_output_tokens: params.max_output_tokens || RESPONSE_LENGTH, + stream: false, + }); + // TODO damn there's a lot of stuff here + return { ok: res }; + } catch (e) { + console.log(e, "error in openai api"); + return { error: `${e}` }; + } + } + + private async apiCallStream( + params: ResponseCreateParamsBase, + handler: (c: string) => void, + ) { + // temperature: 1.3, + const pms: ResponseCreateParamsStreaming = { + ...params, + stream: true, + model: params.model || this.model, + input: params.input, + max_output_tokens: params.max_output_tokens || RESPONSE_LENGTH, + }; + try { + const stream = await this.api.responses.create(pms); + for await (const event of stream) { + console.log(event); + switch (event.type) { + // TODO deal with audio and whatever + case "response.output_text.delta": + handler(event.delta); + break; + case "response.completed": + break; + default: + break; + } + // if (event.type === "response.completed") + // wtf how do we use this + } + } catch (e) { + console.log(e, "error in openai api"); + return { error: `${e}` }; + } + } + + // private async apiCallStream( + // messages: Message[], + // handle: (c: string) => void, + // ): Promise<void> { + // try { + // const stream = await this.api.chat.completions.create({ + // temperature: 1.3, + // model: this.model, + // messages, + // max_tokens: RESPONSE_LENGTH, + // stream: true, + // }); + + // for await (const chunk of stream) { + // for (const choice of chunk.choices) { + // console.log({ choice }); + // if (!choice.delta) continue; + // const cont = choice.delta.content; + // if (!cont) continue; + // handle(cont); + // } + // } + // } catch (e) { + // console.log(e, "error in openai api"); + // handle(`Error streaming OpenAI, ${e}`); + // } + // } +} |
