BibTeX

@inproceedings{UMLWMKM18,
	author	 = {Julian Kunkel and George S. Markomanolis},
	title	 = {{Understanding Metadata Latency with MDWorkbench}},
	year	 = {2018},
	editor	 = {Rio Yokota and Michele Weiland and David Keyes and Carsten Trintis},
	publisher	 = {Springer},
	series	 = {Lecture Notes in Computer Science},
	pages	 = {0--0},
	conference	 = {WOPSSS workshop, ISC HPC},
	organization	 = {ISC Team},
	location	 = {Frankfurt, Germany},
	abstract	 = {While parallel file systems often satisfy the need of applica- tions with bulk synchronous I/O, they lack capabilities of dealing with metadata intense workloads. Typically, in procurements, the focus lies on the aggregated metadata throughput using the MDTest benchmark. However, metadata performance is crucial for interactive use. Metadata benchmarks involve even more parameters compared to I/O benchmarks. There are several aspects that are currently uncovered and, therefore, not in the focus of vendors to investigate. Particularly, response latency and interactive workloads operating on a working set of data. The lack of ca- pabilities from file systems can be observed when looking at the IO-500 list, where metadata performance between best and worst system does not differ significantly. In this paper, we introduce a new benchmark called MDWorkbench which generates a reproducible workload emulating many concurrent users or – in an alternative view – queuing systems. This benchmark pro- vides a detailed latency profile, overcomes caching issues, and provides a method to assess the quality of the observed throughput. We evaluate the benchmark on state-of-the-art parallel file systems with GPFS (IBM Spectrum Scale), Lustre, Cray’s Datawarp, and DDN IME, and conclude that we can reveal characteristics that could not be identified before.},
}