9198a418f39c8a8a3b0773c211e32c84ac944694
[zfs.git] / scripts / survey.sh
1 #!/bin/bash
2
3 prog=survey.sh
4 . ../.script-config
5
6 LOG=/home/`whoami`/zpios-logs/`uname -r`/kpios-`date +%Y%m%d`/
7 mkdir -p ${LOG}
8
9 # Apply all tunings described below to generate some best case
10 # numbers for what is acheivable with some more elbow grease.
11 NAME="prefetch+zerocopy+checksum+pending1024+kmem"
12 echo "----------------------- ${NAME} ------------------------------"
13 ./zpios.sh                                                           \
14         ""                                                           \
15         "zfs_prefetch_disable=1 zfs_vdev_max_pending=1024 zio_bulk_flags=0x100" \
16         "--zerocopy"                                                 \
17         ${LOG}/${NAME}/                                              \
18         "${CMDDIR}/zfs/zfs set checksum=off lustre" |       \
19         tee ${LOG}/${NAME}.txt
20
21 # Baseline number for an out of the box config with no manual tuning.
22 # Ideally, we will want things to be automatically tuned and for this
23 # number to approach the tweaked out results above.
24 NAME="baseline"
25 echo "----------------------- ${NAME} ------------------------------"
26 ./zpios.sh                                                           \
27         ""                                                           \
28         ""                                                           \
29         ""                                                           \
30         ${LOG}/${NAME}/ |                                            \
31         tee ${LOG}/${NAME}.txt
32
33 # Disable ZFS's prefetching.  For some reason still not clear to me
34 # current prefetching policy is quite bad for a random workload.
35 # Allow the algorithm to detect a random workload and not do anything
36 # may be the way to address this issue.
37 NAME="prefetch"
38 echo "----------------------- ${NAME} ------------------------------"
39 ./zpios.sh                                                           \
40         ""                                                           \
41         "zfs_prefetch_disable=1"                                     \
42         ""                                                           \
43         ${LOG}/${NAME}/ |                                            \
44         tee ${LOG}/${NAME}.txt
45
46 # As expected, simulating a zerocopy IO path improves performance
47 # by freeing up lots of CPU which is wasted move data between buffers.
48 NAME="zerocopy"
49 echo "----------------------- ${NAME} ------------------------------"
50 ./zpios.sh                                                           \
51         ""                                                           \
52         ""                                                           \
53         "--zerocopy"                                                 \
54         ${LOG}/${NAME}/ |                                            \
55         tee ${LOG}/${NAME}.txt
56
57 # Disabling checksumming should show some (if small) improvement
58 # simply due to freeing up a modest amount of CPU.
59 NAME="checksum"
60 echo "----------------------- ${NAME} ------------------------------"
61 ./zpios.sh                                                           \
62         ""                                                           \
63         ""                                                           \
64         ""                                                           \
65         ${LOG}/${NAME}/                                              \
66         "${CMDDIR}/zfs/zfs set checksum=off lustre" |       \
67         tee ${LOG}/${NAME}.txt
68
69 # Increasing the pending IO depth also seems to improve things likely
70 # at the expense of latency.  This should be exported more because I'm
71 # seeing a much bigger impact there that I would have expected.  There
72 # may be some low hanging fruit to be found here.
73 NAME="pending"
74 echo "----------------------- ${NAME} ------------------------------"
75 ./zpios.sh                                                           \
76         ""                                                           \
77         "zfs_vdev_max_pending=1024"                                  \
78         ""                                                           \
79         ${LOG}/${NAME}/ |                                            \
80         tee ${LOG}/${NAME}.txt
81
82 # To avoid memory fragmentation issues our slab implementation can be
83 # based on a virtual address space.  Interestingly, we take a pretty
84 # substantial performance penalty for this somewhere in the low level
85 # IO drivers.  If we back the slab with kmem pages we see far better
86 # read performance numbers at the cost of memory fragmention and general
87 # system instability due to large allocations.  This may be because of
88 # an optimization in the low level drivers due to the contigeous kmem
89 # based memory.  This needs to be explained.  The good news here is that
90 # with zerocopy interfaces added at the DMU layer we could gaurentee
91 # kmem based memory for a pool of pages.
92 #
93 # 0x100 = KMC_KMEM - Force kmem_* based slab
94 # 0x200 = KMC_VMEM - Force vmem_* based slab
95 NAME="kmem"
96 echo "----------------------- ${NAME} ------------------------------"
97 ./zpios.sh                                                           \
98         ""                                                           \
99         "zio_bulk_flags=0x100"                                       \
100         ""                                                           \
101         ${LOG}/${NAME}/ |                                            \
102         tee ${LOG}/${NAME}.txt