author	 = {Yuichi Tsujita and Julian Kunkel and Stephan Krempel and Thomas Ludwig},
	title	 = {{Tracing Performance of MPI-I/O with PVFS2: A Case Study of Optimization}},
	year	 = {2010},
	booktitle	 = {{Parallel Computing: From Multicores and GPU's to Petascale}},
	publisher	 = {IOS Press},
	pages	 = {379--386},
	conference	 = {PARCO 2009},
	isbn	 = {978-1-60750-530-3},
	doi	 = {},
	abstract	 = {Parallel computing manages huge amounts of data due to a dramatic increase in computing scale. The parallel file system PVFS version 2 (PVFS2) realizes a scalable file system for such huge data on a cluster system. Although several MPI tracing tools can check the behavior of MPI functions, tracing PVFS server activities has not been available. Hence, we have missed chances to optimize MPI applications regarding PVFS server activities although effective usage of limited resources is important even in PVFS servers. An off-line performance analysis tool named PIOviz traces both MPI-I/O calls and associated PVFS server activities to assist optimization for MPI applications. Besides, tracing statistical values of PVFS servers such as CPU usage and PVFS internal statistics assists optimization of MPI applications. In this paper, we demonstrate two performance evaluation tests of the HPIO benchmark, and carry out off-line analysis by using PIOviz. The evaluation shows effectiveness of PIOviz in detecting bottlenecks of MPI-I/O.},
	url	 = {},