mirror of
https://github.com/flutter/flutter.git
synced 2025-06-03 00:51:18 +00:00
278 lines
9.2 KiB
Dart
278 lines
9.2 KiB
Dart
// Copyright 2014 The Flutter Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file.
|
|
|
|
import 'dart:async';
|
|
import 'dart:convert' show json;
|
|
import 'dart:html' as html;
|
|
|
|
import 'package:macrobenchmarks/src/web/bench_text_layout.dart';
|
|
import 'package:macrobenchmarks/src/web/bench_text_out_of_picture_bounds.dart';
|
|
|
|
import 'src/web/bench_build_material_checkbox.dart';
|
|
import 'src/web/bench_card_infinite_scroll.dart';
|
|
import 'src/web/bench_draw_rect.dart';
|
|
import 'src/web/bench_dynamic_clip_on_static_picture.dart';
|
|
import 'src/web/bench_simple_lazy_text_scroll.dart';
|
|
import 'src/web/bench_text_out_of_picture_bounds.dart';
|
|
import 'src/web/recorder.dart';
|
|
|
|
typedef RecorderFactory = Recorder Function();
|
|
|
|
const bool isCanvasKit = bool.fromEnvironment('FLUTTER_WEB_USE_SKIA', defaultValue: false);
|
|
|
|
/// List of all benchmarks that run in the devicelab.
|
|
///
|
|
/// When adding a new benchmark, add it to this map. Make sure that the name
|
|
/// of your benchmark is unique.
|
|
final Map<String, RecorderFactory> benchmarks = <String, RecorderFactory>{
|
|
BenchCardInfiniteScroll.benchmarkName: () => BenchCardInfiniteScroll(),
|
|
BenchDrawRect.benchmarkName: () => BenchDrawRect(),
|
|
BenchTextOutOfPictureBounds.benchmarkName: () => BenchTextOutOfPictureBounds(),
|
|
BenchSimpleLazyTextScroll.benchmarkName: () => BenchSimpleLazyTextScroll(),
|
|
BenchBuildMaterialCheckbox.benchmarkName: () => BenchBuildMaterialCheckbox(),
|
|
BenchDynamicClipOnStaticPicture.benchmarkName: () => BenchDynamicClipOnStaticPicture(),
|
|
if (isCanvasKit)
|
|
BenchBuildColorsGrid.canvasKitBenchmarkName: () => BenchBuildColorsGrid.canvasKit(),
|
|
|
|
// Benchmarks that we don't want to run using CanvasKit.
|
|
if (!isCanvasKit) ...<String, RecorderFactory>{
|
|
BenchTextLayout.domBenchmarkName: () => BenchTextLayout(useCanvas: false),
|
|
BenchTextLayout.canvasBenchmarkName: () => BenchTextLayout(useCanvas: true),
|
|
BenchTextCachedLayout.domBenchmarkName: () => BenchTextCachedLayout(useCanvas: false),
|
|
BenchTextCachedLayout.canvasBenchmarkName: () => BenchTextCachedLayout(useCanvas: true),
|
|
BenchBuildColorsGrid.domBenchmarkName: () => BenchBuildColorsGrid.dom(),
|
|
BenchBuildColorsGrid.canvasBenchmarkName: () => BenchBuildColorsGrid.canvas(),
|
|
}
|
|
};
|
|
|
|
final LocalBenchmarkServerClient _client = LocalBenchmarkServerClient();
|
|
|
|
Future<void> main() async {
|
|
// Check if the benchmark server wants us to run a specific benchmark.
|
|
final String nextBenchmark = await _client.requestNextBenchmark();
|
|
|
|
if (nextBenchmark == LocalBenchmarkServerClient.kManualFallback) {
|
|
_fallbackToManual('The server did not tell us which benchmark to run next.');
|
|
return;
|
|
}
|
|
|
|
await _runBenchmark(nextBenchmark);
|
|
html.window.location.reload();
|
|
}
|
|
|
|
Future<void> _runBenchmark(String benchmarkName) async {
|
|
final RecorderFactory recorderFactory = benchmarks[benchmarkName];
|
|
|
|
if (recorderFactory == null) {
|
|
_fallbackToManual('Benchmark $benchmarkName not found.');
|
|
return;
|
|
}
|
|
|
|
try {
|
|
final Recorder recorder = recorderFactory();
|
|
final Runner runner = recorder.isTracingEnabled && !_client.isInManualMode
|
|
? Runner(
|
|
recorder: recorder,
|
|
setUpAllDidRun: () => _client.startPerformanceTracing(benchmarkName),
|
|
tearDownAllWillRun: _client.stopPerformanceTracing,
|
|
)
|
|
: Runner(recorder: recorder);
|
|
|
|
final Profile profile = await runner.run();
|
|
if (!_client.isInManualMode) {
|
|
await _client.sendProfileData(profile);
|
|
} else {
|
|
print(profile);
|
|
}
|
|
} catch (error, stackTrace) {
|
|
if (_client.isInManualMode) {
|
|
rethrow;
|
|
}
|
|
await _client.reportError(error, stackTrace);
|
|
}
|
|
}
|
|
|
|
void _fallbackToManual(String error) {
|
|
html.document.body.appendHtml('''
|
|
<div id="manual-panel">
|
|
<h3>$error</h3>
|
|
|
|
<p>Choose one of the following benchmarks:</p>
|
|
|
|
<!-- Absolutely position it so it receives the clicks and not the glasspane -->
|
|
<ul style="position: absolute">
|
|
${
|
|
benchmarks.keys
|
|
.map((String name) => '<li><button id="$name">$name</button></li>')
|
|
.join('\n')
|
|
}
|
|
</ul>
|
|
</div>
|
|
''', validator: html.NodeValidatorBuilder()..allowHtml5()..allowInlineStyles());
|
|
|
|
for (final String benchmarkName in benchmarks.keys) {
|
|
final html.Element button = html.document.querySelector('#$benchmarkName');
|
|
button.addEventListener('click', (_) {
|
|
final html.Element manualPanel = html.document.querySelector('#manual-panel');
|
|
manualPanel?.remove();
|
|
_runBenchmark(benchmarkName);
|
|
});
|
|
}
|
|
}
|
|
|
|
/// Implements the client REST API for the local benchmark server.
|
|
///
|
|
/// The local server is optional. If it is not available the benchmark UI must
|
|
/// implement a manual fallback. This allows debugging benchmarks using plain
|
|
/// `flutter run`.
|
|
class LocalBenchmarkServerClient {
|
|
/// This value is returned by [requestNextBenchmark].
|
|
static const String kManualFallback = '__manual_fallback__';
|
|
|
|
/// Whether we fell back to manual mode.
|
|
///
|
|
/// This happens when you run benchmarks using plain `flutter run` rather than
|
|
/// devicelab test harness. The test harness spins up a special server that
|
|
/// provides API for automatically picking the next benchmark to run.
|
|
bool isInManualMode;
|
|
|
|
/// Asks the local server for the name of the next benchmark to run.
|
|
///
|
|
/// Returns [kManualFallback] if local server is not available (uses 404 as a
|
|
/// signal).
|
|
Future<String> requestNextBenchmark() async {
|
|
final html.HttpRequest request = await _requestXhr(
|
|
'/next-benchmark',
|
|
method: 'POST',
|
|
mimeType: 'application/json',
|
|
sendData: json.encode(benchmarks.keys.toList()),
|
|
);
|
|
|
|
// 404 is expected in the following cases:
|
|
// - The benchmark is ran using plain `flutter run`, which does not provide "next-benchmark" handler.
|
|
// - We ran all benchmarks and the benchmark is telling us there are no more benchmarks to run.
|
|
if (request.status == 404) {
|
|
isInManualMode = true;
|
|
return kManualFallback;
|
|
}
|
|
|
|
isInManualMode = false;
|
|
return request.responseText;
|
|
}
|
|
|
|
void _checkNotManualMode() {
|
|
if (isInManualMode) {
|
|
throw StateError('Operation not supported in manual fallback mode.');
|
|
}
|
|
}
|
|
|
|
/// Asks the local server to begin tracing performance.
|
|
///
|
|
/// This uses the chrome://tracing tracer, which is not available from within
|
|
/// the page itself, and therefore must be controlled from outside using the
|
|
/// DevTools Protocol.
|
|
Future<void> startPerformanceTracing(String benchmarkName) async {
|
|
_checkNotManualMode();
|
|
await html.HttpRequest.request(
|
|
'/start-performance-tracing?label=$benchmarkName',
|
|
method: 'POST',
|
|
mimeType: 'application/json',
|
|
);
|
|
}
|
|
|
|
/// Stops the performance tracing session started by [startPerformanceTracing].
|
|
Future<void> stopPerformanceTracing() async {
|
|
_checkNotManualMode();
|
|
await html.HttpRequest.request(
|
|
'/stop-performance-tracing',
|
|
method: 'POST',
|
|
mimeType: 'application/json',
|
|
);
|
|
}
|
|
|
|
/// Sends the profile data collected by the benchmark to the local benchmark
|
|
/// server.
|
|
Future<void> sendProfileData(Profile profile) async {
|
|
_checkNotManualMode();
|
|
final html.HttpRequest request = await html.HttpRequest.request(
|
|
'/profile-data',
|
|
method: 'POST',
|
|
mimeType: 'application/json',
|
|
sendData: json.encode(profile.toJson()),
|
|
);
|
|
if (request.status != 200) {
|
|
throw Exception(
|
|
'Failed to report profile data to benchmark server. '
|
|
'The server responded with status code ${request.status}.'
|
|
);
|
|
}
|
|
}
|
|
|
|
/// Reports an error to the benchmark server.
|
|
///
|
|
/// The server will halt the devicelab task and log the error.
|
|
Future<void> reportError(dynamic error, StackTrace stackTrace) async {
|
|
_checkNotManualMode();
|
|
await html.HttpRequest.request(
|
|
'/on-error',
|
|
method: 'POST',
|
|
mimeType: 'application/json',
|
|
sendData: json.encode(<String, dynamic>{
|
|
'error': '$error',
|
|
'stackTrace': '$stackTrace',
|
|
}),
|
|
);
|
|
}
|
|
|
|
/// This is the same as calling [html.HttpRequest.request] but it doesn't
|
|
/// crash on 404, which we use to detect `flutter run`.
|
|
Future<html.HttpRequest> _requestXhr(
|
|
String url, {
|
|
String method,
|
|
bool withCredentials,
|
|
String responseType,
|
|
String mimeType,
|
|
Map<String, String> requestHeaders,
|
|
dynamic sendData,
|
|
}) {
|
|
final Completer<html.HttpRequest> completer = Completer<html.HttpRequest>();
|
|
final html.HttpRequest xhr = html.HttpRequest();
|
|
|
|
method ??= 'GET';
|
|
xhr.open(method, url, async: true);
|
|
|
|
if (withCredentials != null) {
|
|
xhr.withCredentials = withCredentials;
|
|
}
|
|
|
|
if (responseType != null) {
|
|
xhr.responseType = responseType;
|
|
}
|
|
|
|
if (mimeType != null) {
|
|
xhr.overrideMimeType(mimeType);
|
|
}
|
|
|
|
if (requestHeaders != null) {
|
|
requestHeaders.forEach((String header, String value) {
|
|
xhr.setRequestHeader(header, value);
|
|
});
|
|
}
|
|
|
|
xhr.onLoad.listen((html.ProgressEvent e) {
|
|
completer.complete(xhr);
|
|
});
|
|
|
|
xhr.onError.listen(completer.completeError);
|
|
|
|
if (sendData != null) {
|
|
xhr.send(sendData);
|
|
} else {
|
|
xhr.send();
|
|
}
|
|
|
|
return completer.future;
|
|
}
|
|
}
|