summaryrefslogtreecommitdiff
path: root/app/src/lib/llm-service.ts
diff options
context:
space:
mode:
Diffstat (limited to 'app/src/lib/llm-service.ts')
-rw-r--r--app/src/lib/llm-service.ts205
1 files changed, 205 insertions, 0 deletions
diff --git a/app/src/lib/llm-service.ts b/app/src/lib/llm-service.ts
new file mode 100644
index 0000000..af82fe2
--- /dev/null
+++ b/app/src/lib/llm-service.ts
@@ -0,0 +1,205 @@
+// import OpenAI from "openai";
+import { createUserContent, GoogleGenAI } from "@google/genai";
+
+import type {
+ CategorizationRequest,
+ CategorizationResponse,
+} from "./categorization";
+import { CATEGORIZATION_PROMPT } from "./llm-prompts";
+
+export interface LLMRequestOptions {
+ model?: string;
+ temperature?: number;
+ maxTokens?: number;
+}
+
+export class LLMService {
+ private defaultOptions: LLMRequestOptions = {};
+ api;
+ model;
+ constructor(
+ // baseURL: string,
+ apiKey: string,
+ options: LLMRequestOptions = {},
+ ) {
+ // const client = new OpenAI({ baseURL, apiKey });
+ const client = new GoogleGenAI({ apiKey });
+ this.api = client;
+ this.model = "gemini-2.5-flash";
+ // this.defaultOptions = {
+ // model: "gemini-2.5-flash",
+ // temperature: 0.3,
+ // maxTokens: 1000,
+ // ...options,
+ // ...options
+ // };
+ }
+
+ async categorizeBookmark(
+ request: CategorizationRequest,
+ options?: LLMRequestOptions,
+ ): Promise<CategorizationResponse> {
+ const mergedOptions = { ...this.defaultOptions, ...options };
+
+ const prompt = this.buildCategorizationPrompt(request);
+ const media = [];
+ const allPics = request.bookmark.media.pics;
+ if (request.bookmark.media.video.thumb)
+ allPics.push(request.bookmark.media.video.thumb);
+ for (const pic of allPics) {
+ const imgdata = await fetch(pic);
+ const imageArrayBuffer = await imgdata.arrayBuffer();
+ const base64ImageData = Buffer.from(imageArrayBuffer).toString("base64");
+ const mimeType = imgdata.headers.get("content-type") || "image/jpeg";
+ const con = { inlineData: { mimeType, data: base64ImageData } };
+ media.push(con);
+ }
+
+ const contents = media
+ ? createUserContent([
+ prompt[0],
+ `**Media**: The bookmark included the following images:`,
+ ...media,
+ prompt[1],
+ ])
+ : [prompt[0] + "\n\n" + prompt[1]];
+
+ try {
+ const response = await this.api.models.generateContent({
+ model: this.model,
+ contents,
+ config: { systemInstruction: CATEGORIZATION_PROMPT },
+ });
+ console.log("llm res", response);
+
+ if (!response.text) {
+ throw new Error("No response content from LLM");
+ }
+
+ return this.parseCategorizationResponse(response.text);
+ } catch (error) {
+ console.error("Error in LLM categorization:", error);
+ throw error;
+ }
+ }
+
+ private buildCategorizationPrompt(
+ request: CategorizationRequest,
+ ): [string, string] {
+ const { bookmark, userCategories } = request;
+
+ const prompt: [string, string] = [
+ `
+Analyze the following bookmark and provide your categorization suggestions.
+
+## Bookmark Details
+**Text**: "${bookmark.text}"
+**Language**: ${bookmark.language}
+**Author**: ${bookmark.author.name} (@${bookmark.author.username})
+**Hashtags**: ${bookmark.hashtags.join(", ") || "None"}
+**URLs**: ${bookmark.urls.map((u) => u.expandedUrl).join(", ") || "None"}`,
+
+ `## User Categories
+${userCategories.map((cat) => `- ${cat.name}: ${cat.criteria}`).join("\n")}
+Please provide your categorization analysis in the requested JSON format.
+`,
+ ];
+
+ return prompt;
+ }
+
+ private parseCategorizationResponse(content: string): CategorizationResponse {
+ try {
+ // Try to extract JSON from the response
+ const jsonMatch = content.match(/\{[\s\S]*\}/);
+ if (!jsonMatch) {
+ throw new Error("No JSON found in response");
+ }
+
+ const parsed = JSON.parse(jsonMatch[0]);
+
+ // Validate response structure
+ if (
+ !parsed.suggestedCategories ||
+ !Array.isArray(parsed.suggestedCategories)
+ ) {
+ throw new Error(
+ "Invalid response structure: missing suggestedCategories",
+ );
+ }
+
+ return {
+ suggestedCategories: parsed.suggestedCategories.map((s: any) => ({
+ categories: Array.isArray(s.categories)
+ ? s.categories
+ : [s.categories],
+ confidence: typeof s.confidence === "number" ? s.confidence : 0.5,
+ reasoning: s.reasoning || "No reasoning provided",
+ })),
+ newCategories: Array.isArray(parsed.newCategories)
+ ? parsed.newCategories
+ : [],
+ summary: parsed.summary || "No summary provided",
+ keyTopics: Array.isArray(parsed.keyTopics) ? parsed.keyTopics : [],
+ };
+ } catch (error) {
+ console.error("Error parsing LLM response:", error);
+
+ // Return fallback response
+ return {
+ suggestedCategories: [
+ {
+ categories: ["Uncategorized"],
+ confidence: 0.1,
+ reasoning: "Failed to parse LLM response",
+ },
+ ],
+ newCategories: [],
+ summary: "Analysis failed",
+ keyTopics: [],
+ };
+ }
+ }
+
+ // Generic LLM request method for future use
+ // async sendPrompt(
+ // prompt: string,
+ // options?: LLMRequestOptions,
+ // ): Promise<string> {
+ // const mergedOptions = { ...this.defaultOptions, ...options };
+
+ // try {
+ // const response = await fetch(`${this.baseUrl}/v1/messages`, {
+ // method: "POST",
+ // headers: {
+ // "Content-Type": "application/json",
+ // Authorization: `Bearer ${this.apiKey}`,
+ // "anthropic-version": "2023-06-01",
+ // },
+ // body: JSON.stringify({
+ // model: mergedOptions.model,
+ // max_tokens: mergedOptions.maxTokens,
+ // temperature: mergedOptions.temperature,
+ // messages: [
+ // {
+ // role: "user",
+ // content: prompt,
+ // },
+ // ],
+ // }),
+ // });
+
+ // if (!response.ok) {
+ // throw new Error(
+ // `LLM API request failed: ${response.status} ${response.statusText}`,
+ // );
+ // }
+
+ // const data = await response.json();
+ // return data.content[0]?.text || "";
+ // } catch (error) {
+ // console.error("Error in generic LLM request:", error);
+ // throw error;
+ // }
+ // }
+}