#![allow(deprecated)]
use crate::analysis;
use crate::connection::OutgoingMessage;
use crate::measurement::{Measurement, WallTime};
use crate::report::{BenchmarkId, Report, ReportContext};
use crate::routine::{Function, Routine};
use crate::{Bencher, Criterion, DurationExt, Mode, PlotConfiguration, SamplingMode, Throughput};
use std::cell::RefCell;
use std::fmt::Debug;
use std::marker::Sized;
use std::time::Duration;
pub struct BenchmarkConfig {
pub confidence_level: f64,
pub measurement_time: Duration,
pub noise_threshold: f64,
pub nresamples: usize,
pub sample_size: usize,
pub significance_level: f64,
pub warm_up_time: Duration,
pub sampling_mode: SamplingMode,
}
#[derive(Clone)]
pub(crate) struct PartialBenchmarkConfig {
pub(crate) confidence_level: Option<f64>,
pub(crate) measurement_time: Option<Duration>,
pub(crate) noise_threshold: Option<f64>,
pub(crate) nresamples: Option<usize>,
pub(crate) sample_size: Option<usize>,
pub(crate) significance_level: Option<f64>,
pub(crate) warm_up_time: Option<Duration>,
pub(crate) sampling_mode: Option<SamplingMode>,
pub(crate) plot_config: PlotConfiguration,
}
impl Default for PartialBenchmarkConfig {
fn default() -> Self {
PartialBenchmarkConfig {
confidence_level: None,
measurement_time: None,
noise_threshold: None,
nresamples: None,
sample_size: None,
significance_level: None,
warm_up_time: None,
plot_config: PlotConfiguration::default(),
sampling_mode: None,
}
}
}
impl PartialBenchmarkConfig {
pub(crate) fn to_complete(&self, defaults: &BenchmarkConfig) -> BenchmarkConfig {
BenchmarkConfig {
confidence_level: self.confidence_level.unwrap_or(defaults.confidence_level),
measurement_time: self.measurement_time.unwrap_or(defaults.measurement_time),
noise_threshold: self.noise_threshold.unwrap_or(defaults.noise_threshold),
nresamples: self.nresamples.unwrap_or(defaults.nresamples),
sample_size: self.sample_size.unwrap_or(defaults.sample_size),
significance_level: self
.significance_level
.unwrap_or(defaults.significance_level),
warm_up_time: self.warm_up_time.unwrap_or(defaults.warm_up_time),
sampling_mode: self.sampling_mode.unwrap_or(defaults.sampling_mode),
}
}
}
pub(crate) struct NamedRoutine<T, M: Measurement = WallTime> {
pub id: String,
pub(crate) f: Box<RefCell<dyn Routine<M, T>>>,
}
#[doc(hidden)]
#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
pub struct ParameterizedBenchmark<T: Debug, M: Measurement = WallTime> {
config: PartialBenchmarkConfig,
values: Vec<T>,
routines: Vec<NamedRoutine<T, M>>,
throughput: Option<Box<dyn Fn(&T) -> Throughput>>,
}
#[doc(hidden)]
#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
pub struct Benchmark<M: Measurement = WallTime> {
config: PartialBenchmarkConfig,
routines: Vec<NamedRoutine<(), M>>,
throughput: Option<Throughput>,
}
#[doc(hidden)]
pub trait BenchmarkDefinition<M: Measurement = WallTime>: Sized {
#[doc(hidden)]
fn run(self, group_id: &str, c: &mut Criterion<M>);
}
macro_rules! benchmark_config {
($type:tt) => {
pub fn sample_size(mut self, n: usize) -> Self {
assert!(n >= 10);
self.config.sample_size = Some(n);
self
}
pub fn warm_up_time(mut self, dur: Duration) -> Self {
assert!(dur.to_nanos() > 0);
self.config.warm_up_time = Some(dur);
self
}
pub fn measurement_time(mut self, dur: Duration) -> Self {
assert!(dur.to_nanos() > 0);
self.config.measurement_time = Some(dur);
self
}
pub fn nresamples(mut self, n: usize) -> Self {
assert!(n > 0);
if n <= 1000 {
println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.config.nresamples = Some(n);
self
}
pub fn noise_threshold(mut self, threshold: f64) -> Self {
assert!(threshold >= 0.0);
self.config.noise_threshold = Some(threshold);
self
}
pub fn confidence_level(mut self, cl: f64) -> Self {
assert!(cl > 0.0 && cl < 1.0);
if cl < 0.5 {
println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
}
self.config.confidence_level = Some(cl);
self
}
pub fn significance_level(mut self, sl: f64) -> Self {
assert!(sl > 0.0 && sl < 1.0);
self.config.significance_level = Some(sl);
self
}
pub fn plot_config(mut self, new_config: PlotConfiguration) -> Self {
self.config.plot_config = new_config;
self
}
pub fn sampling_mode(mut self, new_mode: SamplingMode) -> Self {
self.config.sampling_mode = Some(new_mode);
self
}
};
}
impl<M> Benchmark<M>
where
M: Measurement + 'static,
{
benchmark_config!(Benchmark);
pub fn new<S, F>(id: S, f: F) -> Benchmark<M>
where
S: Into<String>,
F: FnMut(&mut Bencher<'_, M>) + 'static,
{
Benchmark {
config: PartialBenchmarkConfig::default(),
routines: vec![],
throughput: None,
}
.with_function(id, f)
}
pub fn with_function<S, F>(mut self, id: S, mut f: F) -> Benchmark<M>
where
S: Into<String>,
F: FnMut(&mut Bencher<'_, M>) + 'static,
{
let routine = NamedRoutine {
id: id.into(),
f: Box::new(RefCell::new(Function::new(move |b, _| f(b)))),
};
self.routines.push(routine);
self
}
pub fn throughput(mut self, throughput: Throughput) -> Benchmark<M> {
self.throughput = Some(throughput);
self
}
}
impl<M: Measurement> BenchmarkDefinition<M> for Benchmark<M> {
fn run(self, group_id: &str, c: &mut Criterion<M>) {
let report_context = ReportContext {
output_directory: c.output_directory.clone(),
plot_config: self.config.plot_config.clone(),
};
let config = self.config.to_complete(&c.config);
let num_routines = self.routines.len();
let mut all_ids = vec![];
let mut any_matched = false;
if let Some(conn) = &c.connection {
conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: group_id })
.unwrap();
}
for routine in self.routines {
let function_id = if num_routines == 1 && group_id == routine.id {
None
} else {
Some(routine.id)
};
let mut id = BenchmarkId::new(
group_id.to_owned(),
function_id,
None,
self.throughput.clone(),
);
id.ensure_directory_name_unique(&c.all_directories);
c.all_directories.insert(id.as_directory_name().to_owned());
id.ensure_title_unique(&c.all_titles);
c.all_titles.insert(id.as_title().to_owned());
let do_run = c.filter_matches(id.id());
any_matched |= do_run;
execute_benchmark(
do_run,
&id,
c,
&config,
&mut *routine.f.borrow_mut(),
&report_context,
&(),
self.throughput.clone(),
);
all_ids.push(id);
}
if let Some(conn) = &c.connection {
conn.send(&OutgoingMessage::FinishedBenchmarkGroup { group: group_id })
.unwrap();
conn.serve_value_formatter(c.measurement.formatter())
.unwrap();
}
if all_ids.len() > 1 && any_matched && c.mode.is_benchmark() {
c.report
.summarize(&report_context, &all_ids, c.measurement.formatter());
}
if any_matched {
c.report.group_separator();
}
}
}
impl<T, M> ParameterizedBenchmark<T, M>
where
T: Debug + 'static,
M: Measurement + 'static,
{
benchmark_config!(ParameterizedBenchmark);
pub(crate) fn with_functions(
functions: Vec<NamedRoutine<T, M>>,
parameters: Vec<T>,
) -> ParameterizedBenchmark<T, M> {
ParameterizedBenchmark {
config: PartialBenchmarkConfig::default(),
values: parameters,
routines: functions,
throughput: None,
}
}
pub fn new<S, F, I>(id: S, f: F, parameters: I) -> ParameterizedBenchmark<T, M>
where
S: Into<String>,
F: FnMut(&mut Bencher<'_, M>, &T) + 'static,
I: IntoIterator<Item = T>,
{
ParameterizedBenchmark {
config: PartialBenchmarkConfig::default(),
values: parameters.into_iter().collect(),
routines: vec![],
throughput: None,
}
.with_function(id, f)
}
pub fn with_function<S, F>(mut self, id: S, f: F) -> ParameterizedBenchmark<T, M>
where
S: Into<String>,
F: FnMut(&mut Bencher<'_, M>, &T) + 'static,
{
let routine = NamedRoutine {
id: id.into(),
f: Box::new(RefCell::new(Function::new(f))),
};
self.routines.push(routine);
self
}
pub fn throughput<F>(mut self, throughput: F) -> ParameterizedBenchmark<T, M>
where
F: Fn(&T) -> Throughput + 'static,
{
self.throughput = Some(Box::new(throughput));
self
}
}
impl<T, M> BenchmarkDefinition<M> for ParameterizedBenchmark<T, M>
where
T: Debug + 'static,
M: Measurement + 'static,
{
fn run(self, group_id: &str, c: &mut Criterion<M>) {
let report_context = ReportContext {
output_directory: c.output_directory.clone(),
plot_config: self.config.plot_config.clone(),
};
let config = self.config.to_complete(&c.config);
let num_parameters = self.values.len();
let num_routines = self.routines.len();
let mut all_ids = vec![];
let mut any_matched = false;
if let Some(conn) = &c.connection {
conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: group_id })
.unwrap();
}
for routine in self.routines {
for value in &self.values {
let function_id = if num_routines == 1 && group_id == routine.id {
None
} else {
Some(routine.id.clone())
};
let value_str = if num_parameters == 1 {
None
} else {
Some(format!("{:?}", value))
};
let throughput = self.throughput.as_ref().map(|func| func(value));
let mut id = BenchmarkId::new(
group_id.to_owned(),
function_id,
value_str,
throughput.clone(),
);
id.ensure_directory_name_unique(&c.all_directories);
c.all_directories.insert(id.as_directory_name().to_owned());
id.ensure_title_unique(&c.all_titles);
c.all_titles.insert(id.as_title().to_owned());
let do_run = c.filter_matches(id.id());
any_matched |= do_run;
execute_benchmark(
do_run,
&id,
c,
&config,
&mut *routine.f.borrow_mut(),
&report_context,
value,
throughput,
);
all_ids.push(id);
}
}
if let Some(conn) = &c.connection {
conn.send(&OutgoingMessage::FinishedBenchmarkGroup { group: group_id })
.unwrap();
conn.serve_value_formatter(c.measurement.formatter())
.unwrap();
}
if all_ids.len() > 1 && any_matched && c.mode.is_benchmark() {
c.report
.summarize(&report_context, &all_ids, c.measurement.formatter());
}
if any_matched {
c.report.group_separator();
}
}
}
#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
fn execute_benchmark<T, M>(
do_run: bool,
id: &BenchmarkId,
c: &Criterion<M>,
config: &BenchmarkConfig,
routine: &mut dyn Routine<M, T>,
report_context: &ReportContext,
parameter: &T,
throughput: Option<Throughput>,
) where
T: Debug,
M: Measurement,
{
match c.mode {
Mode::Benchmark => {
if let Some(conn) = &c.connection {
if do_run {
conn.send(&OutgoingMessage::BeginningBenchmark { id: id.into() })
.unwrap();
} else {
conn.send(&OutgoingMessage::SkippingBenchmark { id: id.into() })
.unwrap();
}
}
if do_run {
analysis::common(
id,
routine,
config,
c,
report_context,
parameter,
throughput,
);
}
}
Mode::List => {
if do_run {
println!("{}: bench", id);
}
}
Mode::Test => {
if do_run {
c.report.test_start(id, report_context);
routine.test(&c.measurement, parameter);
c.report.test_pass(id, report_context);
}
}
Mode::Profile(duration) => {
if do_run {
routine.profile(&c.measurement, id, c, report_context, duration, parameter);
}
}
}
}