diff --git a/.github/ISSUE_TEMPLATE/benchmark_harness.md b/.github/ISSUE_TEMPLATE/benchmark_harness.md new file mode 100644 index 000000000..a530c52ef --- /dev/null +++ b/.github/ISSUE_TEMPLATE/benchmark_harness.md @@ -0,0 +1,5 @@ +--- +name: "package:benchmark_harness" +about: "Create a bug or file a feature request against package:benchmark_harness." +labels: "package:benchmark_harness" +--- \ No newline at end of file diff --git a/.github/labeler.yml b/.github/labeler.yml index 024b56bfe..8d48ebdfa 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -8,6 +8,10 @@ - changed-files: - any-glob-to-any-file: 'pkgs/bazel_worker/**' +'package:benchmark_harness': + - changed-files: + - any-glob-to-any-file: 'pkgs/benchmark_harness/**' + 'package:boolean_selector': - changed-files: - any-glob-to-any-file: 'pkgs/boolean_selector/**' diff --git a/.github/workflows/benchmark_harness.yaml b/.github/workflows/benchmark_harness.yaml new file mode 100644 index 000000000..c3b776439 --- /dev/null +++ b/.github/workflows/benchmark_harness.yaml @@ -0,0 +1,74 @@ +name: package:benchmark_harness + +on: + # Run on PRs and pushes to the default branch. + push: + branches: [ main ] + paths: + - '.github/workflows/benchmark_harness.yml' + - 'pkgs/benchmark_harness/**' + pull_request: + branches: [ main ] + paths: + - '.github/workflows/benchmark_harness.yml' + - 'pkgs/benchmark_harness/**' + schedule: + - cron: "0 0 * * 0" + +env: + PUB_ENVIRONMENT: bot.github + +defaults: + run: + working-directory: pkgs/benchmark_harness/ + +jobs: + # Check code formatting and static analysis on a single OS (linux) + # against Dart dev. + analyze: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + sdk: [dev] + steps: + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 + - uses: dart-lang/setup-dart@0a8a0fc875eb934c15d08629302413c671d3f672 + with: + sdk: ${{ matrix.sdk }} + - id: install + name: Install dependencies + run: dart pub get + - name: Check formatting + run: dart format --output=none --set-exit-if-changed . + if: always() && steps.install.outcome == 'success' + - name: Analyze code + run: dart analyze --fatal-infos + if: always() && steps.install.outcome == 'success' + + # Run tests on a matrix consisting of two dimensions: + # 1. OS: ubuntu-latest, (macos-latest, windows-latest) + # 2. release channel: dev + test: + needs: analyze + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + # Add macos-latest and/or windows-latest if relevant for this package. + os: [ubuntu-latest] + sdk: [3.2, dev] + steps: + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 + - uses: dart-lang/setup-dart@0a8a0fc875eb934c15d08629302413c671d3f672 + with: + sdk: ${{ matrix.sdk }} + - id: install + name: Install dependencies + run: dart pub get + - name: Run VM tests + run: dart test --platform vm + if: always() && steps.install.outcome == 'success' + - name: Run Chrome tests + run: dart test --platform chrome + if: always() && steps.install.outcome == 'success' diff --git a/README.md b/README.md index 5e3b8c124..c4f28502f 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ don't naturally belong to other topic monorepos (like | Package | Description | Version | | --- | --- | --- | | [bazel_worker](pkgs/bazel_worker/) | Protocol and utilities to implement or invoke persistent bazel workers. | [![pub package](https://img.shields.io/pub/v/bazel_worker.svg)](https://pub.dev/packages/bazel_worker) | +| [benchmark_harness](pkgs/benchmark_harness/) | The official Dart project benchmark harness. | [![pub package](https://img.shields.io/pub/v/benchmark_harness.svg)](https://pub.dev/packages/benchmark_harness) | | [boolean_selector](pkgs/boolean_selector/) | A flexible syntax for boolean expressions, based on a simplified version of Dart's expression syntax. | [![pub package](https://img.shields.io/pub/v/boolean_selector.svg)](https://pub.dev/packages/boolean_selector) | | [browser_launcher](pkgs/browser_launcher/) | Provides a standardized way to launch web browsers for testing and tools. | [![pub package](https://img.shields.io/pub/v/browser_launcher.svg)](https://pub.dev/packages/browser_launcher) | | [cli_config](pkgs/cli_config/) | A library to take config values from configuration files, CLI arguments, and environment variables. | [![pub package](https://img.shields.io/pub/v/cli_config.svg)](https://pub.dev/packages/cli_config) | diff --git a/pkgs/benchmark_harness/.gitignore b/pkgs/benchmark_harness/.gitignore new file mode 100644 index 000000000..2afa93d22 --- /dev/null +++ b/pkgs/benchmark_harness/.gitignore @@ -0,0 +1,4 @@ +.dart_tool +.packages +.pub +pubspec.lock diff --git a/pkgs/benchmark_harness/CHANGELOG.md b/pkgs/benchmark_harness/CHANGELOG.md new file mode 100644 index 000000000..fceecb830 --- /dev/null +++ b/pkgs/benchmark_harness/CHANGELOG.md @@ -0,0 +1,50 @@ +## 2.3.1 + +- Move to `dart-lang/tools` monorepo. + +## 2.3.0 + +- Require Dart 3.2. +- Add ScoreEmitterV2 interface, documented with the intention to change +ScoreEmitter interface to match it in the next major release, + a breaking change. +- Add `PerfBenchmarkBase` class which runs the 'perf stat' command from +linux-tools on a benchmark and reports metrics from the hardware +performance counters and the iteration count, as well as the run time +measurement reported by `BenchmarkBase`. + +## 2.2.2 + +- Added package topics to the pubspec file. +- Require Dart 2.19. + +## 2.2.1 + +- Improve convergence speed of `BenchmarkBase` measuring algorithm by allowing +some degree of measuring jitter. + +## 2.2.0 + +- Change measuring algorithm in `BenchmarkBase` to avoid calling stopwatch +methods repeatedly in the measuring loop. This makes measurement work better +for `run` methods which are small themselves. + +## 2.1.0 + +- Add AsyncBenchmarkBase. + +## 2.0.0 + +- Stable null safety release. + +## 2.0.0-nullsafety.0 + +- Opt in to null safety. + +## 1.0.6 + +- Require at least Dart 2.1. + +## 1.0.5 + +- Updates to support Dart 2. diff --git a/pkgs/benchmark_harness/LICENSE b/pkgs/benchmark_harness/LICENSE new file mode 100644 index 000000000..db5bf4661 --- /dev/null +++ b/pkgs/benchmark_harness/LICENSE @@ -0,0 +1,26 @@ +Copyright 2021, the Dart project authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google LLC nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/pkgs/benchmark_harness/README.md b/pkgs/benchmark_harness/README.md new file mode 100644 index 000000000..f156f2336 --- /dev/null +++ b/pkgs/benchmark_harness/README.md @@ -0,0 +1,116 @@ +[![Build Status](https://github.com/dart-lang/tools/actions/workflows/benchmark_harness.yaml/badge.svg)](https://github.com/dart-lang/tools/actions/workflows/benchmark_harness.yaml) +[![pub package](https://img.shields.io/pub/v/benchmark_harness.svg)](https://pub.dev/packages/benchmark_harness) +[![package publisher](https://img.shields.io/pub/publisher/benchmark_harness.svg)](https://pub.dev/packages/benchmark_harness/publisher) + +The Dart project benchmark harness is the recommended starting point when +building a benchmark for Dart. + +## Interpreting Results + +By default, the reported runtime in `BenchmarkBase` is not for a single call to +`run()`, but for the average time it takes to call `run()` __10 times__ for +legacy reasons. The benchmark harness executes a 10-call timing loop repeatedly +until 2 seconds have elapsed; the reported result is the average of the runtimes +for each loop. This behavior will change in a future major version. + +Benchmarks extending `BenchmarkBase` can opt into the reporting the average time +to call `run()` once by overriding the `exercise` method: + +```dart + @override + void exercise() => run(); +``` + +`AsyncBenchmarkBase` already reports the average time to call `run()` __once__. + +## Comparing Results + +If you are running the same benchmark, on the same machine, running the same OS, +the reported run times can be carefully compared across runs. +Carefully because there are a variety of factors which +could cause error in the run time, for example, the load from +other applications running on your machine could alter the result. + +Comparing the run time of different benchmarks is not recommended. +In other words, don't compare apples with oranges. + +## Features + +* `BenchmarkBase` class that all new benchmarks should `extend`. +* `AsyncBenchmarkBase` for asynchronous benchmarks. +* Template benchmark that you can copy and paste when building new benchmarks. + +## Getting Started + +1\. Add the following to your project's **pubspec.yaml** + +```yaml +dependencies: + benchmark_harness: any +``` + +2\. Install pub packages + +```sh +dart pub install +``` + +3\. Add the following import: + +```dart +import 'package:benchmark_harness/benchmark_harness.dart'; +``` + +4\. Create a benchmark class which inherits from `BenchmarkBase` or + `AsyncBenchmarkBase`. + +## Example + +Create a dart file in the +[`benchmark/`](https://dart.dev/tools/pub/package-layout#tests-and-benchmarks) +folder of your package. + +```dart +// Import BenchmarkBase class. +import 'package:benchmark_harness/benchmark_harness.dart'; + +// Create a new benchmark by extending BenchmarkBase +class TemplateBenchmark extends BenchmarkBase { + const TemplateBenchmark() : super('Template'); + + static void main() { + const TemplateBenchmark().report(); + } + + // The benchmark code. + @override + void run() {} + + // Not measured setup code executed prior to the benchmark runs. + @override + void setup() {} + + // Not measured teardown code executed after the benchmark runs. + @override + void teardown() {} + + // To opt into the reporting the time per run() instead of per 10 run() calls. + //@override + //void exercise() => run(); +} + +void main() { + // Run TemplateBenchmark + TemplateBenchmark.main(); +} +``` + +### Output + +```console +Template(RunTime): 0.1568472448997197 us. +``` + +This is the average amount of time it takes to run `run()` 10 times for +`BenchmarkBase` and once for `AsyncBenchmarkBase`. +> µs is an abbreviation for microseconds. diff --git a/pkgs/benchmark_harness/analysis_options.yaml b/pkgs/benchmark_harness/analysis_options.yaml new file mode 100644 index 000000000..9a77232bb --- /dev/null +++ b/pkgs/benchmark_harness/analysis_options.yaml @@ -0,0 +1,14 @@ +include: package:dart_flutter_team_lints/analysis_options.yaml + +analyzer: + language: + strict-inference: true + strict-casts: true + +linter: + rules: + - avoid_unused_constructor_parameters + - cancel_subscriptions + - literal_only_boolean_expressions + - no_adjacent_strings_in_list + - unnecessary_await_in_return diff --git a/pkgs/benchmark_harness/example/template.dart b/pkgs/benchmark_harness/example/template.dart new file mode 100644 index 000000000..d13a98b47 --- /dev/null +++ b/pkgs/benchmark_harness/example/template.dart @@ -0,0 +1,36 @@ +// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +// Import BenchmarkBase class. +import 'package:benchmark_harness/benchmark_harness.dart'; + +// Create a new benchmark by extending BenchmarkBase +class TemplateBenchmark extends BenchmarkBase { + const TemplateBenchmark() : super('Template'); + + static void main() { + const TemplateBenchmark().report(); + } + + // The benchmark code. + @override + void run() {} + + // Not measured setup code executed prior to the benchmark runs. + @override + void setup() {} + + // Not measures teardown code executed after the benchmark runs. + @override + void teardown() {} + + // To opt into the reporting the time per run() instead of per 10 run() calls. + //@override + //void exercise() => run(); +} + +void main() { + // Run TemplateBenchmark + TemplateBenchmark.main(); +} diff --git a/pkgs/benchmark_harness/integration_test/perf_benchmark_test.dart b/pkgs/benchmark_harness/integration_test/perf_benchmark_test.dart new file mode 100644 index 000000000..339777f28 --- /dev/null +++ b/pkgs/benchmark_harness/integration_test/perf_benchmark_test.dart @@ -0,0 +1,26 @@ +// Copyright 2024, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +import 'package:benchmark_harness/perf_benchmark_harness.dart'; +import 'package:test/test.dart'; + +class PerfBenchmark extends PerfBenchmarkBase { + PerfBenchmark(super.name); + int runCount = 0; + + @override + void run() { + runCount++; + for (final i in List.filled(1000, 7)) { + runCount += i - i; + } + } +} + +void main() { + test('run is called', () async { + final benchmark = PerfBenchmark('ForLoop'); + await benchmark.reportPerf(); + }); +} diff --git a/pkgs/benchmark_harness/lib/benchmark_harness.dart b/pkgs/benchmark_harness/lib/benchmark_harness.dart new file mode 100644 index 000000000..b46a36fb4 --- /dev/null +++ b/pkgs/benchmark_harness/lib/benchmark_harness.dart @@ -0,0 +1,7 @@ +// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +export 'src/async_benchmark_base.dart'; +export 'src/benchmark_base.dart' show BenchmarkBase; +export 'src/score_emitter.dart'; diff --git a/pkgs/benchmark_harness/lib/perf_benchmark_harness.dart b/pkgs/benchmark_harness/lib/perf_benchmark_harness.dart new file mode 100644 index 000000000..3de832939 --- /dev/null +++ b/pkgs/benchmark_harness/lib/perf_benchmark_harness.dart @@ -0,0 +1,7 @@ +// Copyright (c) 2024, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +export 'src/perf_benchmark_base_stub.dart' + if (dart.library.io) 'src/perf_benchmark_base.dart'; +export 'src/score_emitter.dart'; diff --git a/pkgs/benchmark_harness/lib/src/async_benchmark_base.dart b/pkgs/benchmark_harness/lib/src/async_benchmark_base.dart new file mode 100644 index 000000000..1472ee7a4 --- /dev/null +++ b/pkgs/benchmark_harness/lib/src/async_benchmark_base.dart @@ -0,0 +1,69 @@ +// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +import 'score_emitter.dart'; + +class AsyncBenchmarkBase { + final String name; + final ScoreEmitter emitter; + + /// Empty constructor. + const AsyncBenchmarkBase(this.name, {this.emitter = const PrintEmitter()}); + + /// The benchmark code. + /// + /// This function is not used, if both [warmup] and [exercise] are + /// overwritten. + Future run() async {} + + /// Runs a short version of the benchmark. By default invokes [run] once. + Future warmup() async { + await run(); + } + + /// Exercises the benchmark. By default invokes [run] once. + Future exercise() async { + await run(); + } + + /// Not measured setup code executed prior to the benchmark runs. + Future setup() async {} + + /// Not measures teardown code executed after the benchmark runs. + Future teardown() async {} + + /// Measures the score for this benchmark by executing it repeatedly until + /// time minimum has been reached. + static Future measureFor( + Future Function() f, int minimumMillis) async { + final minimumMicros = minimumMillis * 1000; + final watch = Stopwatch()..start(); + var iter = 0; + var elapsed = 0; + while (elapsed < minimumMicros) { + await f(); + elapsed = watch.elapsedMicroseconds; + iter++; + } + return elapsed / iter; + } + + /// Measures the score for the benchmark and returns it. + Future measure() async { + await setup(); + try { + // Warmup for at least 100ms. Discard result. + await measureFor(warmup, 100); + // Run the benchmark for at least 2000ms. + return await measureFor(exercise, 2000); + } finally { + await teardown(); + } + } + + /// Run the benchmark and report results on the [emitter]. + Future report() async { + emitter.emit(name, await measure()); + } +} diff --git a/pkgs/benchmark_harness/lib/src/benchmark_base.dart b/pkgs/benchmark_harness/lib/src/benchmark_base.dart new file mode 100644 index 000000000..bc874f54a --- /dev/null +++ b/pkgs/benchmark_harness/lib/src/benchmark_base.dart @@ -0,0 +1,113 @@ +// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +import 'dart:math' as math; + +import 'score_emitter.dart'; + +const int minimumMeasureDurationMillis = 2000; + +class BenchmarkBase { + final String name; + final ScoreEmitter emitter; + + const BenchmarkBase(this.name, {this.emitter = const PrintEmitter()}); + + /// The benchmark code. + /// + /// This function is not used, if both [warmup] and [exercise] are + /// overwritten. + void run() {} + + /// Runs a short version of the benchmark. By default invokes [run] once. + void warmup() { + run(); + } + + /// Exercises the benchmark. By default invokes [run] 10 times. + void exercise() { + for (var i = 0; i < 10; i++) { + run(); + } + } + + /// Not measured setup code executed prior to the benchmark runs. + void setup() {} + + /// Not measured teardown code executed after the benchmark runs. + void teardown() {} + + /// Measures the score for this benchmark by executing it enough times + /// to reach [minimumMillis]. + + /// Measures the score for this benchmark by executing it repeatedly until + /// time minimum has been reached. + static double measureFor(void Function() f, int minimumMillis) => + measureForImpl(f, minimumMillis).score; + + /// Measures the score for the benchmark and returns it. + double measure() { + setup(); + // Warmup for at least 100ms. Discard result. + measureForImpl(warmup, 100); + // Run the benchmark for at least 2000ms. + var result = measureForImpl(exercise, minimumMeasureDurationMillis); + teardown(); + return result.score; + } + + void report() { + emitter.emit(name, measure()); + } +} + +/// Measures the score for this benchmark by executing it enough times +/// to reach [minimumMillis]. +Measurement measureForImpl(void Function() f, int minimumMillis) { + final minimumMicros = minimumMillis * 1000; + // If running a long measurement permit some amount of measurement jitter + // to avoid discarding results that are almost good, but not quite there. + final allowedJitter = + minimumMillis < 1000 ? 0 : (minimumMicros * 0.1).floor(); + var iter = 2; + var totalIterations = iter; + final watch = Stopwatch()..start(); + while (true) { + watch.reset(); + for (var i = 0; i < iter; i++) { + f(); + } + final elapsed = watch.elapsedMicroseconds; + final measurement = Measurement(elapsed, iter, totalIterations); + if (measurement.elapsedMicros >= (minimumMicros - allowedJitter)) { + return measurement; + } + + iter = measurement.estimateIterationsNeededToReach( + minimumMicros: minimumMicros); + totalIterations += iter; + } +} + +class Measurement { + final int elapsedMicros; + final int iterations; + final int totalIterations; + + Measurement(this.elapsedMicros, this.iterations, this.totalIterations); + + double get score => elapsedMicros / iterations; + + int estimateIterationsNeededToReach({required int minimumMicros}) { + final elapsed = roundDownToMillisecond(elapsedMicros); + return elapsed == 0 + ? iterations * 1000 + : (iterations * math.max(minimumMicros / elapsed, 1.5)).ceil(); + } + + static int roundDownToMillisecond(int micros) => (micros ~/ 1000) * 1000; + + @override + String toString() => '$elapsedMicros in $iterations iterations'; +} diff --git a/pkgs/benchmark_harness/lib/src/perf_benchmark_base.dart b/pkgs/benchmark_harness/lib/src/perf_benchmark_base.dart new file mode 100644 index 000000000..1b7fb92a1 --- /dev/null +++ b/pkgs/benchmark_harness/lib/src/perf_benchmark_base.dart @@ -0,0 +1,135 @@ +// Copyright (c) 2024, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +import 'dart:async'; +import 'dart:convert'; +import 'dart:io'; + +import 'benchmark_base.dart'; +import 'score_emitter.dart'; + +class PerfBenchmarkBase extends BenchmarkBase { + late final Directory fifoDir; + late final String perfControlFifo; + late final RandomAccessFile openedFifo; + late final String perfControlAck; + late final RandomAccessFile openedAck; + late final Process perfProcess; + late final List perfProcessArgs; + + PerfBenchmarkBase(super.name, + {ScoreEmitterV2 super.emitter = const PrintEmitterV2()}); + + ScoreEmitterV2 get _emitterV2 => emitter as ScoreEmitterV2; + + Future _createFifos() async { + perfControlFifo = '${fifoDir.path}/perf_control_fifo'; + perfControlAck = '${fifoDir.path}/perf_control_ack'; + for (final path in [perfControlFifo, perfControlAck]) { + final fifoResult = await Process.run('mkfifo', [path]); + if (fifoResult.exitCode != 0) { + throw ProcessException('mkfifo', [path], + 'Cannot create fifo: ${fifoResult.stderr}', fifoResult.exitCode); + } + } + } + + Future _startPerfStat() async { + await _createFifos(); + perfProcessArgs = [ + 'stat', + '--delay=-1', + '--control=fifo:$perfControlFifo,$perfControlAck', + '-x\\t', + '--pid=$pid', + ]; + perfProcess = await Process.start('perf', perfProcessArgs); + } + + void _enablePerf() { + openedFifo = File(perfControlFifo).openSync(mode: FileMode.writeOnly); + openedAck = File(perfControlAck).openSync(); + openedFifo.writeStringSync('enable\n'); + _waitForAck(); + } + + Future _stopPerfStat(int totalIterations) async { + openedFifo.writeStringSync('disable\n'); + openedFifo.closeSync(); + _waitForAck(); + openedAck.closeSync(); + perfProcess.kill(ProcessSignal.sigint); + unawaited(perfProcess.stdout.drain()); + final lines = await perfProcess.stderr + .transform(utf8.decoder) + .transform(const LineSplitter()) + .toList(); + final exitCode = await perfProcess.exitCode; + // Exit code from perf is -SIGINT when terminated with SIGINT. + if (exitCode != 0 && exitCode != -ProcessSignal.sigint.signalNumber) { + throw ProcessException( + 'perf', perfProcessArgs, lines.join('\n'), exitCode); + } + + const metrics = { + 'cycles': 'CpuCycles', + 'page-faults': 'MajorPageFaults', + }; + for (final line in lines) { + if (line.split('\t') + case [ + String counter, + _, + String event && ('cycles' || 'page-faults'), + ... + ]) { + _emitterV2.emit(name, double.parse(counter) / totalIterations, + metric: metrics[event]!); + } + } + _emitterV2.emit('$name.totalIterations', totalIterations.toDouble(), + metric: 'Count'); + } + + /// Measures the score for the benchmark and returns it. + Future measurePerf() async { + Measurement result; + setup(); + try { + fifoDir = await Directory.systemTemp.createTemp('fifo'); + try { + // Warmup for at least 100ms. Discard result. + measureForImpl(warmup, 100); + await _startPerfStat(); + try { + _enablePerf(); + // Run the benchmark for at least 2000ms. + result = measureForImpl(exercise, minimumMeasureDurationMillis); + await _stopPerfStat(result.totalIterations); + } catch (_) { + perfProcess.kill(ProcessSignal.sigkill); + rethrow; + } + } finally { + await fifoDir.delete(recursive: true); + } + } finally { + teardown(); + } + return result.score; + } + + Future reportPerf() async { + _emitterV2.emit(name, await measurePerf(), unit: 'us.'); + } + + void _waitForAck() { + // Perf writes 'ack\n\x00' to the acknowledgement fifo. + const ackLength = 'ack\n\x00'.length; + var ack = [...openedAck.readSync(ackLength)]; + while (ack.length < ackLength) { + ack.addAll(openedAck.readSync(ackLength - ack.length)); + } + } +} diff --git a/pkgs/benchmark_harness/lib/src/perf_benchmark_base_stub.dart b/pkgs/benchmark_harness/lib/src/perf_benchmark_base_stub.dart new file mode 100644 index 000000000..81aa0ea42 --- /dev/null +++ b/pkgs/benchmark_harness/lib/src/perf_benchmark_base_stub.dart @@ -0,0 +1,18 @@ +// Copyright (c) 2024, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +import 'benchmark_base.dart'; +import 'score_emitter.dart'; + +class PerfBenchmarkBase extends BenchmarkBase { + PerfBenchmarkBase(super.name, {super.emitter = const PrintEmitter()}); + + Future measurePerf() async { + return super.measure(); + } + + Future reportPerf() async { + super.report(); + } +} diff --git a/pkgs/benchmark_harness/lib/src/score_emitter.dart b/pkgs/benchmark_harness/lib/src/score_emitter.dart new file mode 100644 index 000000000..440711861 --- /dev/null +++ b/pkgs/benchmark_harness/lib/src/score_emitter.dart @@ -0,0 +1,38 @@ +// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +abstract class ScoreEmitter { + void emit(String testName, double value); +} + +class PrintEmitter implements ScoreEmitter { + const PrintEmitter(); + + @override + void emit(String testName, double value) { + print('$testName(RunTime): $value us.'); + } +} + +/// New interface for [ScoreEmitter]. [ScoreEmitter] will be changed to +/// this interface in the next major version release, and this class will +/// be deprecated and removed. That release will be a breaking change. +abstract class ScoreEmitterV2 implements ScoreEmitter { + @override + void emit(String testName, double value, + {String metric = 'RunTime', String unit}); +} + +/// New implementation of [PrintEmitter] implementing the [ScoreEmitterV2] +/// interface. [PrintEmitter] will be changed to this implementation in the +/// next major version release. +class PrintEmitterV2 implements ScoreEmitterV2 { + const PrintEmitterV2(); + + @override + void emit(String testName, double value, + {String metric = 'RunTime', String unit = ''}) { + print(['$testName($metric):', value, if (unit.isNotEmpty) unit].join(' ')); + } +} diff --git a/pkgs/benchmark_harness/pubspec.yaml b/pkgs/benchmark_harness/pubspec.yaml new file mode 100644 index 000000000..0615c6b6a --- /dev/null +++ b/pkgs/benchmark_harness/pubspec.yaml @@ -0,0 +1,17 @@ +name: benchmark_harness +version: 2.3.1 +description: The official Dart project benchmark harness. +repository: https://github.com/dart-lang/tools/tree/main/pkgs/benchmark_harness + +topics: + - benchmarking + +environment: + sdk: ^3.2.0 + +dev_dependencies: + build_runner: ^2.0.0 + build_web_compilers: ^4.0.0 + dart_flutter_team_lints: ^3.0.0 + path: ^1.8.0 + test: ^1.16.0 diff --git a/pkgs/benchmark_harness/test/benchmark_harness_test.dart b/pkgs/benchmark_harness/test/benchmark_harness_test.dart new file mode 100644 index 000000000..7f8858485 --- /dev/null +++ b/pkgs/benchmark_harness/test/benchmark_harness_test.dart @@ -0,0 +1,47 @@ +// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +import 'dart:async'; + +import 'package:benchmark_harness/benchmark_harness.dart'; +import 'package:test/test.dart'; + +void main() { + group('benchmark_harness', () { + test('run is called', () { + final benchmark = MockBenchmark(); + final micros = benchmark.measure(); + expect(micros, isPositive); + expect(benchmark.runCount, isPositive); + }); + test('async run is awaited', () async { + final benchmark = MockAsyncBenchmark(); + final micros = await benchmark.measure(); + expect(micros, isPositive); + expect(benchmark.runCount, isPositive); + }); + }); +} + +class MockBenchmark extends BenchmarkBase { + int runCount = 0; + + MockBenchmark() : super('mock benchmark'); + + @override + void run() { + runCount++; + } +} + +class MockAsyncBenchmark extends AsyncBenchmarkBase { + int runCount = 0; + MockAsyncBenchmark() : super('mock benchmark'); + + @override + Future run() async { + await Future.delayed(Duration.zero); + runCount++; + } +} diff --git a/pkgs/benchmark_harness/test/result_emitter_test.dart b/pkgs/benchmark_harness/test/result_emitter_test.dart new file mode 100644 index 000000000..e2cd1eaa9 --- /dev/null +++ b/pkgs/benchmark_harness/test/result_emitter_test.dart @@ -0,0 +1,51 @@ +// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +import 'package:benchmark_harness/benchmark_harness.dart'; +import 'package:test/test.dart'; + +void main() { + benchmarkHarnessTest(); +} + +class MockResultEmitter extends ScoreEmitter { + int emitCount = 0; + + @override + void emit(String name, double value) { + emitCount++; + } +} + +// Create a new benchmark which has an emitter. +class BenchmarkWithResultEmitter extends BenchmarkBase { + const BenchmarkWithResultEmitter(ScoreEmitter emitter) + : super('Template', emitter: emitter); + + @override + void run() {} + + @override + void setup() {} + + @override + void teardown() {} +} + +void benchmarkHarnessTest() { + MockResultEmitter createMockEmitter() { + var emitter = MockResultEmitter(); + return emitter; + } + + group('ResultEmitter', () { + test('should be called when emitter is provided', () { + var emitter = createMockEmitter(); + var testBenchmark = BenchmarkWithResultEmitter(emitter); + testBenchmark.report(); + + expect(emitter.emitCount, equals(1)); + }); + }); +}