m5.12xlarge 48vCPU 192GiB RAM gp2 3000GiB x 4 Raid0 provision IOPS 3000 x 3 x4 = 36000
Root device
/dev/xvda
Block devices
/dev/xvda
/dev/sdb
/dev/sdc
/dev/sdd
/dev/sde
create a 4-volume stripe - Raid0
# #Use the lsblk command to view your available disk devices and their mount points
[ec2-user@ip-10-0-0-105 ~ ]$ sudo lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
nvme1n1 259:0 0 3T 0 disk
nvme2n1 259:1 0 3T 0 disk
nvme3n1 259:2 0 3T 0 disk
nvme4n1 259:3 0 3T 0 disk
nvme0n1 259:4 0 20G 0 disk
├─nvme0n1p1 259:5 0 20G 0 part /
└─nvme0n1p128 259:6 0 1M 0 part
# # create a 4-volume stripe
[ec2-user@ip-10-0-0-105 ~ ]$ sudo mdadm --create /dev/md0 --level=0 --chunk=64 --raid-devices=4 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/nvme4n1
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
# # Allow time for the RAID array to initialize and synchronize.
[ec2-user@ip-10-0-0-105 ~ ]$ sudo cat /proc/mdstat
Personalities : [raid0]
md0 : active raid0 nvme4n1[3] nvme3n1[2] nvme2n1[1] nvme1n1[0]
12582387712 blocks super 1.2 64k chunks
unused devices: < none>
[ec2-user@ip-10-0-0-105 ~ ]$ sudo mdadm --detail /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Thu Aug 8 03:09:16 2019
Raid Level : raid0
Array Size : 12582387712 (11999.50 GiB 12884.37 GB)
Raid Devices : 4
Total Devices : 4
Persistence : Superblock is persistent
Update Time : Thu Aug 8 03:09:16 2019
State : clean
Active Devices : 4
Working Devices : 4
Failed Devices : 0
Spare Devices : 0
Chunk Size : 64K
Consistency Policy : none
Name : 0
UUID : d5e9aef4:3591aac9:d3b72848:d988e61e
Events : 0
Number Major Minor RaidDevice State
0 259 0 0 active sync /dev/sdd
1 259 1 1 active sync /dev/sde
2 259 2 2 active sync /dev/sdb
3 259 3 3 active sync /dev/sdc
[ec2-user@ip-10-0-0-105 ~ ]$ sudo lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
nvme1n1 259:0 0 3T 0 disk
└─md0 9:0 0 11.7T 0 raid0
nvme2n1 259:1 0 3T 0 disk
└─md0 9:0 0 11.7T 0 raid0
nvme3n1 259:2 0 3T 0 disk
└─md0 9:0 0 11.7T 0 raid0
nvme4n1 259:3 0 3T 0 disk
└─md0 9:0 0 11.7T 0 raid0
nvme0n1 259:4 0 20G 0 disk
├─nvme0n1p1 259:5 0 20G 0 part /
└─nvme0n1p128 259:6 0 1M 0 part
# #use the mkfs -t command to create a file system on the volume
[ec2-user@ip-10-0-0-105 ~ ]$ sudo mkdir -p /mnt/p_iops_vol0 && sudo mkfs.xfs -L MY_RAID /dev/md0
meta-data=/dev/md0 isize=512 agcount=32, agsize=98299888 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=0
data = bsize=4096 blocks=3145596416, imaxpct=5
= sunit=16 swidth=64 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=521728, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
# #To ensure that the RAID array is reassembled automatically on boot, create a configuration file to contain the RAID information:
[ec2-user@ip-10-0-0-105 ~ ]$ sudo mdadm --detail --scan | sudo tee -a /etc/mdadm.conf
ARRAY /dev/md0 metadata=1.2 name=0 UUID=d5e9aef4:3591aac9:d3b72848:d988e61e
# #Create a new ramdisk image to properly preload the block device modules for your new RAID configuration:
[ec2-user@ip-10-0-0-105 ~ ]$ sudo dracut -H -f /boot/initramfs-$( uname -r) .img $( uname -r)
# #mount the volume
[ec2-user@ip-10-0-0-105 ~ ]$ sudo mount -t xfs /dev/md0 /mnt/p_iops_vol0
[ec2-user@ip-10-0-0-105 ~ ]$ sudo chown ec2-user:ec2-user /mnt/p_iops_vol0/
[ec2-user@ip-10-0-0-105 ~ ]$ sudo lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
nvme1n1 259:0 0 3T 0 disk
└─md0 9:0 0 11.7T 0 raid0 /mnt/p_iops_vol0
nvme2n1 259:1 0 3T 0 disk
└─md0 9:0 0 11.7T 0 raid0 /mnt/p_iops_vol0
nvme3n1 259:2 0 3T 0 disk
└─md0 9:0 0 11.7T 0 raid0 /mnt/p_iops_vol0
nvme4n1 259:3 0 3T 0 disk
└─md0 9:0 0 11.7T 0 raid0 /mnt/p_iops_vol0
nvme0n1 259:4 0 20G 0 disk
├─nvme0n1p1 259:5 0 20G 0 part /
└─nvme0n1p128 259:6 0 1M 0 part
[ec2-user@ip-10-0-0-105 ~ ]$ df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 93G 0 93G 0% /dev
tmpfs 93G 0 93G 0% /dev/shm
tmpfs 93G 648K 93G 1% /run
tmpfs 93G 0 93G 0% /sys/fs/cgroup
/dev/nvme0n1p1 20G 1.4G 19G 7% /
tmpfs 19G 0 19G 0% /run/user/0
tmpfs 19G 0 19G 0% /run/user/1000
/dev/md0 12T 12G 12T 1% /mnt/p_iops_vol0
# #Automatically Mount an Attached Volume After Reboot
# #Create a backup of your /etc/fstab file
sudo cp /etc/fstab /etc/fstab.orig
# #Add the following entry to /etc/fstab to mount the device at the specified mount point.
sudo vim /etc/fstab
LABEL=MY_RAID /mnt/p_iops_vol0 xfs defaults,nofail 0 2
# #To verify that your entry works
[ec2-user@ip-10-0-0-166 ~ ]$ sudo umount /mnt/p_iops_vol0
[ec2-user@ip-10-0-0-166 ~ ]$ sudo mount -a
[ec2-user@ip-10-0-0-166 ~ ]$ df -h
# # install XFS file system support
[ec2-user@ip-10-0-0-105 ~ ]$ sudo yum install -y xfsprogs
Loaded plugins: extras_suggestions, langpacks, priorities, update-motd
amzn2-core | 2.4 kB 00:00:00
Package xfsprogs-4.5.0-18.amzn2.0.1.x86_64 already installed and latest version
Nothing to do
# # Install Benchmark Tools
[ec2-user ~ ]$ sudo yum install -y fio
# # Disable C-States
[ec2-user@ip-10-0-0-105 ~ ]$ sudo cpupower idle-info | grep " Number of idle states:"
Number of idle states: 4
[ec2-user@ip-10-0-0-105 ~ ]$ N=4
[ec2-user@ip-10-0-0-105 ~ ]$ for i in ` seq 1 $(( N- 1 )) ` ; do sudo cpupower idle-set -d $i ; done
[ec2-user@ip-10-0-0-105 ~ ]$ sudo cpupower idle-info | grep " Number of idle states:"
performs 16 KB random write operations
sudo fio --ioengine=libaio --iodepth=32 --refill_buffers --randrepeat=0 --directory=/mnt/p_iops_vol0 --name fio_test_file --direct=1 --rw=randwrite --bs=16k --size=1G --numjobs=16 --time_based --runtime=180 --group_reporting --norandommap
fio_test_file: (groupid=0, jobs=16): err= 0: pid=19824: Thu Aug 8 09:32:24 2019
write: io=101696MB, bw=578488KB/s, iops=36155, runt=180016msec
slat (usec): min=3, max=1699.5K, avg=424.97, stdev=2860.02
clat (usec): min=394, max=1718.9K, avg=13732.69, stdev=16516.64
lat (usec): min=398, max=1719.5K, avg=14157.66, stdev=17073.54
clat percentiles (usec):
| 1.00th=[ 482], 5.00th=[ 524], 10.00th=[ 556], 20.00th=[ 724],
| 30.00th=[ 3952], 40.00th=[10048], 50.00th=[12608], 60.00th=[14656],
| 70.00th=[17280], 80.00th=[24448], 90.00th=[29056], 95.00th=[36608],
| 99.00th=[45312], 99.50th=[52480], 99.90th=[61696], 99.95th=[69120],
| 99.99th=[602112]
lat (usec) : 500=2.34%, 750=18.06%, 1000=1.52%
lat (msec) : 2=2.34%, 4=5.98%, 10=9.67%, 20=34.47%, 50=24.93%
lat (msec) : 100=0.67%, 250=0.01%, 500=0.01%, 750=0.01%, 1000=0.01%
lat (msec) : 2000=0.01%
cpu : usr=0.92%, sys=1.29%, ctx=1682508, majf=0, minf=153
IO depths : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=100.0%, > =64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, > =64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.1%, 64=0.0%, > =64=0.0%
issued : total=r=0/w=6508565/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0
latency : target=0, window=0, percentile=100.00%, depth=32
Run status group 0 (all jobs):
WRITE: io=101696MB, aggrb=578487KB/s, minb=578487KB/s, maxb=578487KB/s, mint=180016msec, maxt=180016msec
Disk stats (read/write):
md0: ios=4/6508533, merge=0/0, ticks=0/0, in_queue=0, util=0.00%, aggrios=1/1629478, aggrmerge=0/0, aggrticks=1/6606020, aggrin_queue=6482532, aggrutil=93.28%
nvme3n1: ios=0/1629404, merge=0/0, ticks=0/2616580, in_queue=2493740, util=85.70%
nvme2n1: ios=0/1631037, merge=0/0, ticks=0/12053112, in_queue=11930020, util=93.28%
nvme1n1: ios=0/1628099, merge=0/1, ticks=0/1040612, in_queue=921312, util=85.11%
nvme4n1: ios=4/1629375, merge=0/0, ticks=4/10713776, in_queue=10585056, util=92.03%
# without --ioengine=libaio --iodepth=32
sudo fio --directory=/mnt/p_iops_vol0 --name fio_test_file --direct=1 --rw=randwrite --bs=16k --size=1G --numjobs=16 --time_based --runtime=180 --group_reporting --norandommap
fio_test_file: (groupid=0, jobs=16): err= 0: pid=19927: Thu Aug 8 03:18:57 2019
write: io=74808MB, bw=425575KB/s, iops=26598, runt=180001msec
clat (usec): min=410, max=153938, avg=600.91, stdev=441.18
lat (usec): min=410, max=153939, avg=601.10, stdev=441.18
clat percentiles (usec):
| 1.00th=[ 474], 5.00th=[ 498], 10.00th=[ 510], 20.00th=[ 532],
| 30.00th=[ 548], 40.00th=[ 564], 50.00th=[ 580], 60.00th=[ 596],
| 70.00th=[ 620], 80.00th=[ 652], 90.00th=[ 700], 95.00th=[ 732],
| 99.00th=[ 868], 99.50th=[ 964], 99.90th=[ 3376], 99.95th=[ 4832],
| 99.99th=[ 6944]
lat (usec) : 500=5.99%, 750=90.04%, 1000=3.55%
lat (msec) : 2=0.26%, 4=0.08%, 10=0.07%, 20=0.01%, 50=0.01%
lat (msec) : 100=0.01%, 250=0.01%
cpu : usr=0.23%, sys=1.63%, ctx=4791196, majf=0, minf=147
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, > =64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, > =64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, > =64=0.0%
issued : total=r=0/w=4787740/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0
latency : target=0, window=0, percentile=100.00%, depth=1
Run status group 0 (all jobs):
WRITE: io=74808MB, aggrb=425574KB/s, minb=425574KB/s, maxb=425574KB/s, mint=180001msec, maxt=180001msec
Disk stats (read/write):
md0: ios=4/4787074, merge=0/0, ticks=0/0, in_queue=0, util=0.00%, aggrios=1/1197966, aggrmerge=0/0, aggrticks=0/706577, aggrin_queue=604710, aggrutil=97.40%
nvme3n1: ios=0/1198290, merge=0/0, ticks=0/714336, in_queue=612744, util=97.39%
nvme2n1: ios=0/1199343, merge=0/0, ticks=0/692532, in_queue=590556, util=96.78%
nvme1n1: ios=0/1196794, merge=0/1, ticks=0/715212, in_queue=613016, util=97.40%
nvme4n1: ios=4/1197438, merge=0/0, ticks=0/704228, in_queue=602524, util=96.99%
performs 16 KB random read operations
sudo fio --ioengine=libaio --iodepth=32 --refill_buffers --randrepeat=0 --directory=/mnt/p_iops_vol0 --name fio_test_file --direct=1 --rw=randread --bs=16k --size=1G --numjobs=16 --time_based --runtime=180 --group_reporting --norandommap
fio_test_file: (groupid=0, jobs=16): err= 0: pid=20081: Thu Aug 8 09:40:14 2019
read : io=102101MB, bw=580793KB/s, iops=36299, runt=180015msec
slat (usec): min=2, max=38948, avg=424.41, stdev=1931.81
clat (usec): min=0, max=231588, avg=13679.77, stdev=13415.46
lat (usec): min=3, max=231591, avg=14104.18, stdev=14067.33
clat percentiles (usec):
| 1.00th=[ 189], 5.00th=[ 213], 10.00th=[ 233], 20.00th=[ 286],
| 30.00th=[ 1560], 40.00th=[ 7072], 50.00th=[12864], 60.00th=[14784],
| 70.00th=[17792], 80.00th=[25728], 90.00th=[31872], 95.00th=[40704],
| 99.00th=[52480], 99.50th=[55552], 99.90th=[65280], 99.95th=[69120],
| 99.99th=[76288]
lat (usec) : 2=0.08%, 10=0.04%, 20=0.02%, 50=0.03%, 100=0.02%
lat (usec) : 250=13.92%, 500=13.02%, 750=0.69%, 1000=0.47%
lat (msec) : 2=3.61%, 4=5.26%, 10=7.49%, 20=28.12%, 50=25.88%
lat (msec) : 100=1.35%, 250=0.01%
cpu : usr=0.35%, sys=1.17%, ctx=1716075, majf=0, minf=2189
IO depths : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=100.0%, > =64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, > =64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.1%, 64=0.0%, > =64=0.0%
issued : total=r=6534465/w=0/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0
latency : target=0, window=0, percentile=100.00%, depth=32
Run status group 0 (all jobs):
READ: io=102101MB, aggrb=580792KB/s, minb=580792KB/s, maxb=580792KB/s, mint=180015msec, maxt=180015msec
Disk stats (read/write):
md0: ios=6510324/6, merge=0/0, ticks=0/0, in_queue=0, util=0.00%, aggrios=1628035/4, aggrmerge=0/0, aggrticks=6150736/8, aggrin_queue=6049822, aggrutil=98.98%
nvme3n1: ios=1626683/4, merge=0/0, ticks=539484/0, in_queue=443320, util=62.10%
nvme2n1: ios=1628699/4, merge=0/0, ticks=21040232/28, in_queue=20923696, util=98.98%
nvme1n1: ios=1628654/6, merge=0/0, ticks=2462604/4, in_queue=2365348, util=68.56%
nvme4n1: ios=1628104/4, merge=0/0, ticks=560624/0, in_queue=466924, util=62.10%
# without --ioengine=libaio --iodepth=32
sudo fio --directory=/mnt/p_iops_vol0 --name fio_test_file --direct=1 --rw=randread --bs=16k --size=1G --numjobs=16 --time_based --runtime=180 --group_reporting --norandommap
fio_test_file: (groupid=0, jobs=16): err= 0: pid=20053: Thu Aug 8 03:23:43 2019
read : io=98743MB, bw=561735KB/s, iops=35108, runt=180001msec
clat (usec): min=2, max=151427, avg=455.34, stdev=842.88
lat (usec): min=2, max=151427, avg=455.37, stdev=842.88
clat percentiles (usec):
| 1.00th=[ 193], 5.00th=[ 209], 10.00th=[ 217], 20.00th=[ 231],
| 30.00th=[ 249], 40.00th=[ 270], 50.00th=[ 310], 60.00th=[ 378],
| 70.00th=[ 446], 80.00th=[ 564], 90.00th=[ 876], 95.00th=[ 1064],
| 99.00th=[ 1512], 99.50th=[ 2320], 99.90th=[ 6944], 99.95th=[13632],
| 99.99th=[39680]
lat (usec) : 4=0.20%, 10=0.05%, 20=0.01%, 250=30.73%, 500=44.54%
lat (usec) : 750=10.80%, 1000=7.13%
lat (msec) : 2=5.92%, 4=0.44%, 10=0.13%, 20=0.04%, 50=0.03%
lat (msec) : 100=0.01%, 250=0.01%
cpu : usr=0.22%, sys=1.56%, ctx=6304074, majf=0, minf=204
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, > =64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, > =64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, > =64=0.0%
issued : total=r=6319555/w=0/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0
latency : target=0, window=0, percentile=100.00%, depth=1
Run status group 0 (all jobs):
READ: io=98743MB, aggrb=561735KB/s, minb=561735KB/s, maxb=561735KB/s, mint=180001msec, maxt=180001msec
Disk stats (read/write):
md0: ios=6301983/6, merge=0/0, ticks=0/0, in_queue=0, util=0.00%, aggrios=1575993/4, aggrmerge=0/0, aggrticks=707170/0, aggrin_queue=605274, aggrutil=93.88%
nvme3n1: ios=1576322/3, merge=0/0, ticks=738812/0, in_queue=637136, util=92.54%
nvme2n1: ios=1577069/5, merge=0/0, ticks=849500/0, in_queue=746644, util=93.88%
nvme1n1: ios=1574932/6, merge=0/0, ticks=557732/0, in_queue=456096, util=88.98%
nvme4n1: ios=1575650/4, merge=0/0, ticks=682636/0, in_queue=581220, util=91.24%