// clat_hist files are indexed sequentially by inner job. If you have a job
// file with 2 jobs, each with numjobs=4 you will have 8 clat_hist files.
clat_hist_idx = 0
for job in fio_json_result["jobs"]:
job_name = job["jobname"]
parameters = parameter_metadata[job_name]
parameters["fio_job"] = job_name
if base_metadata:
parameters.update(base_metadata)
for mode in io_modes:
if job[mode]["io_bytes"]:
metric_name = "%s:%s" % (job_name, mode)
bw_metadata = {
"bw_min": job[mode]["bw_min"],
"bw_max": job[mode]["bw_max"],
"bw_dev": job[mode]["bw_dev"],
"bw_agg": job[mode]["bw_agg"],
"bw_mean": job[mode]["bw_mean"]}
bw_metadata.update(parameters)
samples.append(
sample.Sample("%s:bandwidth" % metric_name,
job[mode]["bw"],
"KB/s", bw_metadata))
// There is one sample whose metric is "<metric_name>:latency"
// with all of the latency statistics in its metadata, and then
// a bunch of samples whose metrics are
// "<metric_name>:latency:min" through
// "<metric_name>:latency:p99.99" that hold the individual
// latency numbers as values. This is for historical reasons.
clat_section = job[mode]["clat"]
percentiles = clat_section["percentile"]
lat_statistics = [
("min", clat_section["min"]),
("max", clat_section["max"]),
("mean", clat_section["mean"]),
("stddev", clat_section["stddev"]),
("p1", percentiles["1.000000"]),
("p5", percentiles["5.000000"]),
("p10", percentiles["10.000000"]),
("p20", percentiles["20.000000"]),
("p30", percentiles["30.000000"]),
("p40", percentiles["40.000000"]),
("p50", percentiles["50.000000"]),
("p60", percentiles["60.000000"]),
("p70", percentiles["70.000000"]),
("p80", percentiles["80.000000"]),
("p90", percentiles["90.000000"]),
("p95", percentiles["95.000000"]),
("p99", percentiles["99.000000"]),
("p99.5", percentiles["99.500000"]),
("p99.9", percentiles["99.900000"]),
("p99.95", percentiles["99.950000"]),
("p99.99", percentiles["99.990000"])]
lat_metadata = parameters.copy()
for name, val in lat_statistics:
lat_metadata[name] = val
samples.append(
sample.Sample("%s:latency" % metric_name,
job[mode]["clat"]["mean"],
"usec", lat_metadata, timestamp))
for stat_name, stat_val in lat_statistics:
samples.append(
sample.Sample("%s:latency:%s" % (metric_name, stat_name),
stat_val, "usec", parameters, timestamp))
samples.append(
sample.Sample("%s:iops" % metric_name,
job[mode]["iops"], "", parameters, timestamp))
if log_file_base and bin_vals:
// Parse histograms
aggregates = collections.defaultdict(collections.Counter)
for _ in xrange(int(job["job options"]["numjobs"])):
clat_hist_idx += 1
hist_file_path = vm_util.PrependTempDir(
"%s_clat_hist.%s.log" % (log_file_base, str(clat_hist_idx)))
hists = _ParseHistogram(hist_file_path, bin_vals[clat_hist_idx - 1])
for key in hists:
aggregates[key].update(hists[key])
samples += _BuildHistogramSamples(aggregates, job_name, parameters)
return samples