Difference between revisions of "ZFS on high latency devices"

Jump to navigation Jump to search
m
no edit summary
m
m
Line 1: Line 1:
This guide assumes familiarity with common ZFS commands and configuration steps.  At a mimimum, you
This guide assumes familiarity with common ZFS commands and configuration steps.  At a mimimum, you
should understand how ZFS categorizes I/O and how to use zpool iostat -r, -q and -w.  There's no
should understand how ZFS categorizes I/O and how to use zpool iostat -r, -q and -w.  There's no
magic list of parameters to drop in, but rather a procedure to follow so that you can match ZFS to
magic list of parameters to drop in, but rather a procedure to follow so that you can calibrate ZFS to
your device.  This process can be used on local disk as well to identify bottlenecks and problems
your device.  This process can be used on local disk as well to identify bottlenecks and problems
with data flow, but the gains may be much less significant.
with data flow, but the gains may be much less significant.
Line 149: Line 149:
# Fill out all non-static values before copying to /etc/modprobe.d/zfs.conf
# Fill out all non-static values before copying to /etc/modprobe.d/zfs.conf
#
#
# disabling the throttle greatly aids merge
# Disabling the throttle during calibration greatly aids merge
options zfs zio_dva_throttle_enabled=0
options zfs zio_dva_throttle_enabled=0
# txg commit every 30 seconds
# TxG commit every 30 seconds
options zfs zfs_txg_timeout=30
options zfs zfs_txg_timeout=30
# start txg commit just before writers ramp up
# Start txg commit just before writers ramp up
options zfs zfs_dirty_data_sync = {zfs_dirty_data_max * zfs_async_dirty_min * 0.9}
options zfs zfs_dirty_data_sync = {zfs_dirty_data_max * zfs_async_dirty_min * 0.9}
# save last 100 txg's information
# Save last 100 txg's information
options zfs zfs_txg_history=100
options zfs zfs_txg_history=100
#
#
# 0: IO aggregation
# 0: IO aggregation
# limit total agg for very large blocks to blocksize + 64K and read gap to 0.75m
# Limit total agg for very large blocks to blocksize + 64K and read gap to 0.75m
options zfs zfs_vdev_aggregation_limit=blocksize * K * 3
options zfs zfs_vdev_aggregation_limit=blocksize * K * 3
options zfs zfs_vdev_write_gap_limit=ashift * 4 (16k for ashift=12)
options zfs zfs_vdev_write_gap_limit=ashift * 4 (16k for ashift=12)
Line 176: Line 176:
#
#
# 4: Reduce sync_read, async_read and async_write max
# 4: Reduce sync_read, async_read and async_write max
# 4a: Reduce async_write_max_active
options zfs zfs_vdev_async_write_max_active=30
# 4b: Reduce async_read_max_active
options zfs zfs_vdev_async_read_max_active=30
# 4c: Reduce sync_read_max_active
options zfs zfs_vdev_sync_read_max_active=30
#
# 5: Raise agg limits
### options zfs zfs_vdev_aggregation_limit=blocksize * K * 3
#
# These are good enough to start with
options zfs zfs_vdev_sync_read_min_active=4
options zfs zfs_vdev_sync_read_min_active=4
options zfs zfs_vdev_sync_read_max_active=30
options zfs zfs_vdev_async_read_min_active=2
options zfs zfs_vdev_async_write_min_active=2
#
# 6a: Set sync_writes:
options zfs zfs_vdev_sync_write_min_active=10
options zfs zfs_vdev_sync_write_min_active=10
options zfs zfs_vdev_sync_write_max_active=20
options zfs zfs_vdev_sync_write_max_active=20
options zfs zfs_vdev_async_read_min_active=2
options zfs zfs_vdev_async_read_max_active=30
options zfs zfs_vdev_async_write_min_active=2
options zfs zfs_vdev_async_write_max_active=30
#
#
# 5: Set ZIO throttle
# 6b: Set max threads per vdev
### options zfs zfs_vdev_max_active= SRmax * 1.25
#
# 7: Calibrate ZIO throttle
### options zfs zfs_vdev_queue_depth_pct=5000
### options zfs zfs_vdev_queue_depth_pct=5000
### options zfs zio_dva_throttle_enabled=1
### options zfs zio_dva_throttle_enabled=1
#
#
# 6: Recheck!
# 8: Recheck!
</pre>
</pre>


Editor
17

edits

Navigation menu