Skip to content

Commit

Permalink
feat: add fully verifiable agents with primus zktls
Browse files Browse the repository at this point in the history
  • Loading branch information
谢翔 authored and 谢翔 committed Jan 16, 2025
1 parent 7918d58 commit 3ea33e4
Show file tree
Hide file tree
Showing 19 changed files with 1,094 additions and 73 deletions.
1 change: 1 addition & 0 deletions agent/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
"@elizaos/plugin-fuel": "workspace:*",
"@elizaos/plugin-avalanche": "workspace:*",
"@elizaos/plugin-web-search": "workspace:*",
"@elizaos/plugin-primus": "workspace:*",
"readline": "1.3.0",
"ws": "8.18.0",
"yargs": "17.7.2"
Expand Down
17 changes: 17 additions & 0 deletions agent/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import { LensAgentClient } from "@elizaos/client-lens";
import { SlackClientInterface } from "@elizaos/client-slack";
import { TelegramClientInterface } from "@elizaos/client-telegram";
import { TwitterClientInterface } from "@elizaos/client-twitter";
import { PrimusAdapter } from "@elizaos/plugin-primus";
import {
AgentRuntime,
CacheManager,
Expand Down Expand Up @@ -516,6 +517,22 @@ export async function createAgent(
);
}

let verifiableInferenceAdapter;
if (
process.env.PRIMUS_APP_ID &&
process.env.PRIMUS_APP_SECRET &&
process.env.VERIFIABLE_INFERENCE_ENABLED === "true"
){
verifiableInferenceAdapter = new PrimusAdapter({
appId: process.env.PRIMUS_APP_ID,
appSecret: process.env.PRIMUS_APP_SECRET,
attMode: "proxytls",
modelProvider: character.modelProvider,
token,
});
elizaLogger.log("Verifiable inference primus adapter initialized");
}

return new AgentRuntime({
databaseAdapter: db,
token,
Expand Down
50 changes: 50 additions & 0 deletions packages/core/src/generation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@ import {
ServiceType,
SearchResponse,
ActionResponse,
IVerifiableInferenceAdapter,
VerifiableInferenceOptions,
VerifiableInferenceResult,
TelemetrySettings,
TokenizerType,
} from "./types.ts";
Expand Down Expand Up @@ -176,6 +179,8 @@ export async function generateText({
maxSteps = 1,
stop,
customSystemPrompt,
verifiableInference = process.env.VERIFIABLE_INFERENCE_ENABLED === "true",
verifiableInferenceOptions,
}: {
runtime: IAgentRuntime;
context: string;
Expand All @@ -185,6 +190,9 @@ export async function generateText({
maxSteps?: number;
stop?: string[];
customSystemPrompt?: string;
verifiableInference?: boolean;
verifiableInferenceAdapter?: IVerifiableInferenceAdapter;
verifiableInferenceOptions?: VerifiableInferenceOptions;
}): Promise<string> {
if (!context) {
console.error("generateText context is empty");
Expand All @@ -196,8 +204,38 @@ export async function generateText({
elizaLogger.info("Generating text with options:", {
modelProvider: runtime.modelProvider,
model: modelClass,
verifiableInference,
});

elizaLogger.log("Using provider:", runtime.modelProvider);
// If verifiable inference is requested and adapter is provided, use it
if (verifiableInference && runtime.verifiableInferenceAdapter) {
elizaLogger.log(
"Using verifiable inference adapter:",
runtime.verifiableInferenceAdapter
);
try {
const result: VerifiableInferenceResult =
await runtime.verifiableInferenceAdapter.generateText(
context,
modelClass,
verifiableInferenceOptions
);
elizaLogger.log("Verifiable inference result:", result);
// Verify the proof
const isValid =
await runtime.verifiableInferenceAdapter.verifyProof(result);
if (!isValid) {
throw new Error("Failed to verify inference proof");
}

return result.text;
} catch (error) {
elizaLogger.error("Error in verifiable inference:", error);
throw error;
}
}

const provider = runtime.modelProvider;
const endpoint =
runtime.character.modelEndpointOverride || models[provider].endpoint;
Expand Down Expand Up @@ -1522,6 +1560,9 @@ export interface GenerationOptions {
stop?: string[];
mode?: "auto" | "json" | "tool";
experimental_providerMetadata?: Record<string, unknown>;
verifiableInference?: boolean;
verifiableInferenceAdapter?: IVerifiableInferenceAdapter;
verifiableInferenceOptions?: VerifiableInferenceOptions;
}

/**
Expand Down Expand Up @@ -1553,6 +1594,9 @@ export const generateObject = async ({
schemaDescription,
stop,
mode = "json",
verifiableInference = false,
verifiableInferenceAdapter,
verifiableInferenceOptions,
}: GenerationOptions): Promise<GenerateObjectResult<unknown>> => {
if (!context) {
const errorMessage = "generateObject context is empty";
Expand Down Expand Up @@ -1596,6 +1640,9 @@ export const generateObject = async ({
runtime,
context,
modelClass,
verifiableInference,
verifiableInferenceAdapter,
verifiableInferenceOptions,
});

return response;
Expand All @@ -1621,6 +1668,9 @@ interface ProviderOptions {
modelOptions: ModelSettings;
modelClass: string;
context: string;
verifiableInference?: boolean;
verifiableInferenceAdapter?: IVerifiableInferenceAdapter;
verifiableInferenceOptions?: VerifiableInferenceOptions;
}

/**
Expand Down
15 changes: 15 additions & 0 deletions packages/core/src/runtime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import {
ICacheManager,
IDatabaseAdapter,
IMemoryManager,
IVerifiableInferenceAdapter,
KnowledgeItem,
ModelClass,
ModelProviderName,
Expand Down Expand Up @@ -150,6 +151,8 @@ export class AgentRuntime implements IAgentRuntime {
cacheManager: ICacheManager;
clients: Record<string, any>;

verifiableInferenceAdapter?: IVerifiableInferenceAdapter;

registerMemoryManager(manager: IMemoryManager): void {
if (!manager.tableName) {
throw new Error("Memory manager must have a tableName");
Expand Down Expand Up @@ -231,6 +234,7 @@ export class AgentRuntime implements IAgentRuntime {
speechModelPath?: string;
cacheManager: ICacheManager;
logging?: boolean;
verifiableInferenceAdapter?: IVerifiableInferenceAdapter;
}) {
elizaLogger.info("Initializing AgentRuntime with options:", {
character: opts.character?.name,
Expand Down Expand Up @@ -390,6 +394,8 @@ export class AgentRuntime implements IAgentRuntime {
(opts.evaluators ?? []).forEach((evaluator: Evaluator) => {
this.registerEvaluator(evaluator);
});

this.verifiableInferenceAdapter = opts.verifiableInferenceAdapter;
}

async initialize() {
Expand Down Expand Up @@ -661,6 +667,7 @@ export class AgentRuntime implements IAgentRuntime {
runtime: this,
context,
modelClass: ModelClass.SMALL,
verifiableInferenceAdapter: this.verifiableInferenceAdapter,
});

const evaluators = parseJsonArrayFromText(
Expand Down Expand Up @@ -1293,6 +1300,14 @@ Text: ${attachment.text}
attachments: formattedAttachments,
} as State;
}

getVerifiableInferenceAdapter(): IVerifiableInferenceAdapter | undefined {
return this.verifiableInferenceAdapter;
}

setVerifiableInferenceAdapter(adapter: IVerifiableInferenceAdapter): void {
this.verifiableInferenceAdapter = adapter;
}
}

const formatKnowledge = (knowledge: KnowledgeItem[]) => {
Expand Down
64 changes: 64 additions & 0 deletions packages/core/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1125,6 +1125,8 @@ export interface IAgentRuntime {
// but I think the real solution is forthcoming as a base client interface
clients: Record<string, any>;

verifiableInferenceAdapter?: IVerifiableInferenceAdapter | null;

initialize(): Promise<void>;

registerMemoryManager(manager: IMemoryManager): void;
Expand Down Expand Up @@ -1318,6 +1320,68 @@ export interface ISlackService extends Service {
client: any;
}

/**
* Available verifiable inference providers
*/
export enum VerifiableInferenceProvider {
PRIMUS = "primus",
}

/**
* Options for verifiable inference
*/
export interface VerifiableInferenceOptions {
/** Custom endpoint URL */
endpoint?: string;
/** Custom headers */
headers?: Record<string, string>;
/** Provider-specific options */
providerOptions?: Record<string, unknown>;
}

/**
* Result of a verifiable inference request
*/
export interface VerifiableInferenceResult {
/** Generated text */
text: string;
/** Proof */
proof: any;
/** Proof id */
id?: string;
/** Provider information */
provider: VerifiableInferenceProvider;
/** Timestamp */
timestamp: number;
}

/**
* Interface for verifiable inference adapters
*/
export interface IVerifiableInferenceAdapter {
options: any;
/**
* Generate text with verifiable proof
* @param context The input text/prompt
* @param modelClass The model class/name to use
* @param options Additional provider-specific options
* @returns Promise containing the generated text and proof data
*/
generateText(
context: string,
modelClass: string,
options?: VerifiableInferenceOptions
): Promise<VerifiableInferenceResult>;

/**
* Verify the proof of a generated response
* @param result The result containing response and proof to verify
* @returns Promise indicating if the proof is valid
*/
verifyProof(result: VerifiableInferenceResult): Promise<boolean>;
}


export enum TokenizerType {
Auto = "auto",
TikToken = "tiktoken",
Expand Down
6 changes: 6 additions & 0 deletions packages/plugin-primus/.npmignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
*

!dist/**
!package.json
!readme.md
!tsup.config.ts
Loading

0 comments on commit 3ea33e4

Please sign in to comment.