+space_map_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
+#endif /* WITH_FF_BLOCK_ALLOCATOR */
+
+#if defined(WITH_DF_BLOCK_ALLOCATOR)
+/*
+ * ==========================================================================
+ * Dynamic block allocator -
+ * Uses the first fit allocation scheme until space get low and then
+ * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
+ * and metaslab_df_free_pct to determine when to switch the allocation scheme.
+ * ==========================================================================
+ */
+static uint64_t
+metaslab_df_alloc(space_map_t *sm, uint64_t size)
+{
+ avl_tree_t *t = &sm->sm_root;
+ uint64_t align = size & -size;
+ uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
+ uint64_t max_size = metaslab_pp_maxsize(sm);
+ int free_pct = sm->sm_space * 100 / sm->sm_size;
+
+ ASSERT(MUTEX_HELD(sm->sm_lock));
+ ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
+
+ if (max_size < size)
+ return (-1ULL);
+
+ /*
+ * If we're running low on space switch to using the size
+ * sorted AVL tree (best-fit).
+ */
+ if (max_size < metaslab_df_alloc_threshold ||
+ free_pct < metaslab_df_free_pct) {
+ t = sm->sm_pp_root;
+ *cursor = 0;
+ }
+
+ return (metaslab_block_picker(t, cursor, size, 1ULL));
+}
+
+static boolean_t
+metaslab_df_fragmented(space_map_t *sm)
+{
+ uint64_t max_size = metaslab_pp_maxsize(sm);
+ int free_pct = sm->sm_space * 100 / sm->sm_size;
+
+ if (max_size >= metaslab_df_alloc_threshold &&
+ free_pct >= metaslab_df_free_pct)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+static space_map_ops_t metaslab_df_ops = {
+ metaslab_pp_load,
+ metaslab_pp_unload,
+ metaslab_df_alloc,
+ metaslab_pp_claim,
+ metaslab_pp_free,
+ metaslab_pp_maxsize,
+ metaslab_df_fragmented
+};
+
+space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
+#endif /* WITH_DF_BLOCK_ALLOCATOR */
+
+/*
+ * ==========================================================================
+ * Other experimental allocators
+ * ==========================================================================
+ */
+#if defined(WITH_CDF_BLOCK_ALLOCATOR)
+static uint64_t
+metaslab_cdf_alloc(space_map_t *sm, uint64_t size)
+{
+ avl_tree_t *t = &sm->sm_root;
+ uint64_t *cursor = (uint64_t *)sm->sm_ppd;
+ uint64_t *extent_end = (uint64_t *)sm->sm_ppd + 1;
+ uint64_t max_size = metaslab_pp_maxsize(sm);
+ uint64_t rsize = size;
+ uint64_t offset = 0;
+
+ ASSERT(MUTEX_HELD(sm->sm_lock));
+ ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
+
+ if (max_size < size)
+ return (-1ULL);
+
+ ASSERT3U(*extent_end, >=, *cursor);
+
+ /*
+ * If we're running low on space switch to using the size
+ * sorted AVL tree (best-fit).
+ */
+ if ((*cursor + size) > *extent_end) {
+
+ t = sm->sm_pp_root;
+ *cursor = *extent_end = 0;
+
+ if (max_size > 2 * SPA_MAXBLOCKSIZE)
+ rsize = MIN(metaslab_min_alloc_size, max_size);
+ offset = metaslab_block_picker(t, extent_end, rsize, 1ULL);
+ if (offset != -1)
+ *cursor = offset + size;
+ } else {
+ offset = metaslab_block_picker(t, cursor, rsize, 1ULL);
+ }
+ ASSERT3U(*cursor, <=, *extent_end);
+ return (offset);
+}
+
+static boolean_t
+metaslab_cdf_fragmented(space_map_t *sm)
+{
+ uint64_t max_size = metaslab_pp_maxsize(sm);
+
+ if (max_size > (metaslab_min_alloc_size * 10))
+ return (B_FALSE);
+ return (B_TRUE);
+}
+
+static space_map_ops_t metaslab_cdf_ops = {
+ metaslab_pp_load,
+ metaslab_pp_unload,
+ metaslab_cdf_alloc,
+ metaslab_pp_claim,
+ metaslab_pp_free,
+ metaslab_pp_maxsize,
+ metaslab_cdf_fragmented
+};
+
+space_map_ops_t *zfs_metaslab_ops = &metaslab_cdf_ops;
+#endif /* WITH_CDF_BLOCK_ALLOCATOR */
+
+#if defined(WITH_NDF_BLOCK_ALLOCATOR)
+uint64_t metaslab_ndf_clump_shift = 4;
+
+static uint64_t
+metaslab_ndf_alloc(space_map_t *sm, uint64_t size)
+{
+ avl_tree_t *t = &sm->sm_root;
+ avl_index_t where;
+ space_seg_t *ss, ssearch;
+ uint64_t hbit = highbit(size);
+ uint64_t *cursor = (uint64_t *)sm->sm_ppd + hbit - 1;
+ uint64_t max_size = metaslab_pp_maxsize(sm);
+
+ ASSERT(MUTEX_HELD(sm->sm_lock));
+ ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
+
+ if (max_size < size)
+ return (-1ULL);
+
+ ssearch.ss_start = *cursor;
+ ssearch.ss_end = *cursor + size;
+
+ ss = avl_find(t, &ssearch, &where);
+ if (ss == NULL || (ss->ss_start + size > ss->ss_end)) {
+ t = sm->sm_pp_root;
+
+ ssearch.ss_start = 0;
+ ssearch.ss_end = MIN(max_size,
+ 1ULL << (hbit + metaslab_ndf_clump_shift));
+ ss = avl_find(t, &ssearch, &where);
+ if (ss == NULL)
+ ss = avl_nearest(t, where, AVL_AFTER);
+ ASSERT(ss != NULL);
+ }
+
+ if (ss != NULL) {
+ if (ss->ss_start + size <= ss->ss_end) {
+ *cursor = ss->ss_start + size;
+ return (ss->ss_start);
+ }
+ }
+ return (-1ULL);
+}
+
+static boolean_t
+metaslab_ndf_fragmented(space_map_t *sm)
+{
+ uint64_t max_size = metaslab_pp_maxsize(sm);
+
+ if (max_size > (metaslab_min_alloc_size << metaslab_ndf_clump_shift))
+ return (B_FALSE);
+ return (B_TRUE);
+}
+
+
+static space_map_ops_t metaslab_ndf_ops = {
+ metaslab_pp_load,
+ metaslab_pp_unload,
+ metaslab_ndf_alloc,
+ metaslab_pp_claim,
+ metaslab_pp_free,
+ metaslab_pp_maxsize,
+ metaslab_ndf_fragmented
+};
+
+space_map_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
+#endif /* WITH_NDF_BLOCK_ALLOCATOR */
+