author	 = {Julian Kunkel and Eugen Betke},
	title	 = {{An MPI-IO In-Memory Driver for Non-Volatile Pooled Memory of the Kove XPD}},
	year	 = {2017},
	booktitle	 = {{High Performance Computing: ISC High Performance 2017 International Workshops, DRBSD, ExaComm, HCPM, HPC-IODC, IWOPH, IXPUG, P^3MA, VHPC, Visualization at Scale, WOPSSS}},
	editor	 = {Julian Kunkel and Rio Yokota and Michaela Taufer and John Shalf},
	publisher	 = {Springer},
	series	 = {Lecture Notes in Computer Science},
	number	 = {10524},
	pages	 = {644--655},
	conference	 = {ISC High Performance},
	location	 = {Frankfurt, Germany},
	isbn	 = {978-3-319-67629-6},
	doi	 = {},
	abstract	 = {Many scientific applications are limited by the performance offered by parallel file systems. SSD based burst buffers provide significant better performance than HDD backed storage but at the expense of capacity. Clearly, achieving wire-speed of the interconnect and predictable low latency I/O is the holy grail of storage. In-memory storage promises to provide optimal performance exceeding SSD based solutions. Kove R ’s XPD R offers pooled memory for cluster systems. This remote memory is asynchronously backed up to storage devices of the XPDs and considered to be non-volatile. Albeit the system offers various APIs to access this memory such as treating it as a block device, it does not allow to expose it as file system that offers POSIX or MPI-IO semantics. In this paper, we 1) describe the XPD-MPIIO-driver which supports the scale-out architecture of the XPDs. This MPI-agnostic driver enables high-level libraries to utilize the XPD’s memory as storage. 2) A thorough performance evaluation of the XPD is conducted. This includes scaleout testing of the infrastructure and metadata operations but also performance variability. We show that the driver and storage architecture is able to nearly saturate wire-speed of Infiniband (60+ GiB/s with 14 FDR links) while providing low latency and little performance variability.},