Skip to content

Commit

Permalink
update summary package
Browse files Browse the repository at this point in the history
  • Loading branch information
v1xingyue committed Feb 15, 2025
1 parent 2b76191 commit 93a2269
Show file tree
Hide file tree
Showing 7 changed files with 284 additions and 28 deletions.
21 changes: 13 additions & 8 deletions docs/posts.json
Original file line number Diff line number Diff line change
@@ -1,22 +1,27 @@
[
{
"title": "扩展 eliza : 构建你的第一个 Plugin",
"description": "本文介绍了如何构建一个 eliza 插件,包括插件的结构、Action、Service、Provider 等模块的定义和使用方法。还详细说明了如何在 runtime 中调用插件、插件之间的互相调用,以及 Provider 的调用方式。最后提供了一个 plugin-sui 的演示和相关的视频教程链接",
"description": "本文介绍了如何构建一个 eliza 插件,包括插件的结构、Action、Service、Provider 等模块的定义和使用方法,以及如何在 runtime 中调用和插件之间的互相调用",
"id": "first-plugin-action"
},
{
"title": "Eliza 原理解析(上):Agent 工具调用",
"description": "本文详细介绍了Eliza框架中Provider和Action的运行原理。Provider用于解决AI获取特定领域信息不准确的问题,通过封装代码获取数据并以自然语言形式返回给AI Model。Action则用于封装复杂的链上操作,帮助AI理解并执行任务。文章还探讨了如何让AI理解其调用的Action,并通过callback函数将执行结果加入AI的memory中。",
"id": "eliza-baiscs-call-tools"
"title": "Run Eliza With Tee",
"description": "介绍TEE 的相关知识,以及如何使用TEE 运行Eliza 的 AI Agent.",
"id": "tee-and-eliza"
},
{
"title": "快速开始,构建一个 Telegram 的 Chatbot",
"description": "本指南详细介绍了如何从零开始构建一个 Telegram 聊天机器人。内容包括环境安装、NodeJS 安装、代码下载、启动 eliza、配置 telegram bot、选择大模型服务、配置 character.json、启动 telegram bot 测试以及常见问题解决。此外,还提供了视频教程链接,帮助用户更直观地理解和操作",
"description": "本指南详细介绍了如何从零开始构建一个 Telegram 聊天机器人。内容包括环境安装、NodeJS 配置、代码下载与构建、Telegram Bot 的申请与配置、大模型服务的选择与配置、以及常见问题的解决方法。此外,还提供了视频教程链接,帮助开发者更直观地理解和操作",
"id": "first-telegram-bot"
},
{
"title": "Run Eliza With Tee",
"description": "介绍TEE 的相关知识,以及如何使用TEE 运行Eliza 的 AI Agent。作者 Twitter: https://x.com/zhou49。包含视频直播链接。",
"id": "tee-and-eliza"
"title": "Eliza 原理解析(上):Agent 工具调用",
"description": "本文详细介绍了Eliza框架中Provider和Action的运行原理。Provider用于解决AI获取特定领域信息不准确的问题,而Action则封装了AI的执行代码,帮助AI理解并执行复杂任务。文章还探讨了AI如何通过Provider获取信息、如何调用Action以及如何让AI理解其调用的Action。",
"id": "eliza-baiscs-call-tools"
},
{
"title": "Eliza knowlege & memory",
"description": "Eliza 中的存储结构包括 Character、AgentRuntime、MemoryManagers 等组件,通过数据库适配器进行数据存储。缓存管理器 ICacheManager 提供 get、set、delete 接口,支持 redis、database、filesystem 三种存储方式。数据库支持 sqlite 和 postgres,作为数据持久层。知识碎片通过 uuid 生成唯一 id,处理模块位于 src/core/knowlege.ts,写入分两步进行。memoryManagers 包含多个管理器,如 messageManager、knowledgeManager 等。memory 的基本单位包括 id、userId、agentId 等字段。使用举例包括 memory 的写入和读取,以及 knowledge 的获取和写入。",
"id": "custom-agent-withmemory"
}
]
3 changes: 3 additions & 0 deletions packages/tools/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
"scripts": {
"publish": "npm publish -r --access public",
"summary": "ts-node src/summary.ts",
"test": "ts-node src/test.ts",
"build": "tsc --outDir dist"
},
"bin": {
Expand All @@ -27,6 +28,8 @@
"typescript": "^5.2.2"
},
"dependencies": {
"@ai-sdk/openai": "^1.1.11",
"ai": "^4.1.39",
"axios": "^1.7.9",
"commander": "^13.0.0",
"dotenv": "^16.4.7"
Expand Down
36 changes: 18 additions & 18 deletions packages/tools/src/openai.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
import { generateText } from "ai";
import { openai } from "@ai-sdk/openai";
import { newOpenAI } from "./tools";

const renderTemplate = (
template: string,
state: Record<string, any>
Expand All @@ -18,34 +22,30 @@ const renderTemplate = (
});
};

export const generateText = async (
export const myGenerateText = async (
state: { body: string },
template: string
) => {
console.log(process.env.REDPILL_API_KEY);
// console.log(process.env.REDPILL_API_KEY);
const content = renderTemplate(template, state);
// console.log("send : ", content);

const response = await fetch("https://api.deepseek.com/chat/completions", {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${process.env.DEEPSEEK_API_KEY}`,
},
body: JSON.stringify({
model: process.env.MODEL,
messages: [{ role: "user", content: content }],
}),
});
const model = process.env.MODEL;
if (!model) {
throw new Error("MODEL is not set");
}

console.log("response : ", response.status);
const openai = newOpenAI();
const response = await generateText({
model: openai(model),
system: "You are a friendly assistant!",
prompt: content,
});

const data = (await response.json()) as any;
return data.choices[0].message.content;
return response.text;
};

export const summaryContent = async (content: string) => {
const response = await generateText(
const response = await myGenerateText(
{
body: content,
},
Expand Down
4 changes: 2 additions & 2 deletions packages/tools/src/summary.ts
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import fs, { writeFileSync } from "fs";
import dotenv from "dotenv";
import path from "path";
import { generateText, trimJSON } from "./openai";
import { myGenerateText, trimJSON } from "./openai";

// 获取项目根目录的 .env
const rootEnvPath = path.join(__dirname, "..", "..", "..", ".env");
dotenv.config({ path: rootEnvPath });

const summaryPost = async (content: string) => {
const summary = await generateText(
const summary = await myGenerateText(
{
body: content,
},
Expand Down
24 changes: 24 additions & 0 deletions packages/tools/src/test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import { generateText } from "ai";
import dotenv from "dotenv";
import { newOpenAI } from "./tools";
const main = async () => {
dotenv.config();
const model = process.env.MODEL;
if (!model) {
throw new Error("MODEL is not set");
}

const content = "你好";

const openai = newOpenAI();

const response = await generateText({
model: openai(model),
system: "You are a friendly assistant!",
prompt: content,
});

console.log(response.text);
};

main();
28 changes: 28 additions & 0 deletions packages/tools/src/tools.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,34 @@
import path from "path";
import fs from "fs";
import dotenv from "dotenv";
import axios from "axios";
import { AxiosHeaders } from "axios";
import { createOpenAI } from "@ai-sdk/openai";

export const newOpenAI = () => {
const ai = createOpenAI({
apiKey: process.env.OPENAI_API_KEY,
baseURL: process.env.OPENAI_BASE_URL,
fetch: async (input: URL | RequestInfo, init?: RequestInit | undefined) => {
const url = input.toString();
if (init?.method === "POST" && init?.body) {
console.info(`do proxy request\n ${url} \n ${init.body}`);

const headers = new AxiosHeaders();
headers.set("Content-Type", "application/json");
headers.set("Authorization", `Bearer ${process.env.OPENAI_API_KEY}`);
const response = await axios.post(url, init.body, { headers });
// Mimic the fetch Response object
return new Response(JSON.stringify(response.data), {
status: response.status,
statusText: response.statusText,
});
}
return fetch(input, init);
},
});
return ai;
};

// find .env from current path to parrent path , max depth is 10
export const findDotenv = () => {
Expand Down
Loading

0 comments on commit 93a2269

Please sign in to comment.