Skip to content

Commit

Permalink
chore: add benchmark options (#690)
Browse files Browse the repository at this point in the history
  • Loading branch information
louis-jan authored Jun 13, 2024
1 parent 65da850 commit de351c6
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 10 deletions.
34 changes: 31 additions & 3 deletions cortex-js/src/infrastructure/commanders/benchmark.command.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { CommandRunner, SubCommand } from 'nest-commander';
import { CommandRunner, SubCommand, Option } from 'nest-commander';
import { BenchmarkCliUsecases } from './usecases/benchmark.cli.usecases';
import { BenchmarkConfig } from './types/benchmark-config.interface';

@SubCommand({
name: 'benchmark',
Expand All @@ -12,7 +13,34 @@ export class BenchmarkCommand extends CommandRunner {
super();
}

async run(): Promise<void> {
return this.benchmarkUsecases.benchmark();
async run(
_input: string[],
options?: Partial<BenchmarkConfig>,
): Promise<void> {
return this.benchmarkUsecases.benchmark(options ?? {});
}

@Option({
flags: '-n, --num_rounds <num_rounds>',
description: 'Number of rounds to run the benchmark',
})
parseRounds(value: number) {
return value;
}

@Option({
flags: '-c, --concurrency <concurrency>',
description: 'Number of concurrent requests to run the benchmark',
})
parseConcurrency(value: number) {
return value;
}

@Option({
flags: '-o, --output <output>',
description: 'Output format for the benchmark results. json or table',
})
parseOutput(value: string) {
return value;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,12 @@ export class BenchmarkCliUsecases {
/**
* Benchmark and analyze the performance of a specific AI model using a variety of system resources
*/
async benchmark() {
async benchmark(options: Partial<BenchmarkConfig>) {
return this.getBenchmarkConfig().then((config) => {
this.config = config;
this.config = {
...config,
...options,
};

// TODO: Using OpenAI client or Cortex client to benchmark?
this.openai = new OpenAI({
Expand All @@ -41,6 +44,7 @@ export class BenchmarkCliUsecases {

const serveProcess = spawn('cortex', ['serve'], {
detached: false,
shell: process.platform == 'win32',
});

return this.cortexUsecases
Expand Down Expand Up @@ -261,8 +265,21 @@ export class BenchmarkCliUsecases {
fs.writeFileSync(outputFilePath, JSON.stringify(output, null, 2));
console.log(`Benchmark results and metrics saved to ${outputFilePath}`);

console.log(
inspect(output, { showHidden: false, depth: null, colors: true }),
);
if (this.config.output === 'table') {
console.log('Results:');
output.results.forEach((round) => {
console.log('Round ' + round.round + ':');
console.table(round.results);
});
console.log('Metrics:');
console.table(output.metrics);
} else
console.log(
inspect(output, {
showHidden: false,
depth: null,
colors: true,
}),
);
}
}
4 changes: 2 additions & 2 deletions cortex-js/src/infrastructure/constants/benchmark.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@ export const defaultBenchmarkConfiguration: BenchmarkConfig = {
},
},
prompts: {
min: 102,
min: 1024,
max: 2048,
samples: 10,
},
output: 'json',
output: 'table',
hardware: ['cpu', 'gpu', 'psu', 'chassis', 'ram'],
concurrency: 1,
num_rounds: 10,
Expand Down

0 comments on commit de351c6

Please sign in to comment.