git://git.camperquake.de
/
zfs.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Fix get/set users/groups in quota props via numeric id
[zfs.git]
/
module
/
zfs
/
vdev.c
diff --git
a/module/zfs/vdev.c
b/module/zfs/vdev.c
index
4bed646
..
e0d82e6
100644
(file)
--- a/
module/zfs/vdev.c
+++ b/
module/zfs/vdev.c
@@
-21,6
+21,8
@@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
*/
#include <sys/zfs_context.h>
@@
-107,7
+109,7
@@
vdev_get_min_asize(vdev_t *vd)
vdev_t *pvd = vd->vdev_parent;
/*
vdev_t *pvd = vd->vdev_parent;
/*
- *
The
our parent is NULL (inactive spare or cache) or is the root,
+ *
If
our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
if (pvd == NULL)
* just return our own asize.
*/
if (pvd == NULL)
@@
-193,7
+195,7
@@
vdev_add_child(vdev_t *pvd, vdev_t *cvd)
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
- newchild = kmem_zalloc(newsize, KM_
SLEEP
);
+ newchild = kmem_zalloc(newsize, KM_
PUSHPAGE
);
if (pvd->vdev_child != NULL) {
bcopy(pvd->vdev_child, newchild, oldsize);
kmem_free(pvd->vdev_child, oldsize);
if (pvd->vdev_child != NULL) {
bcopy(pvd->vdev_child, newchild, oldsize);
kmem_free(pvd->vdev_child, oldsize);
@@
-263,7
+265,7
@@
vdev_compact_children(vdev_t *pvd)
if (pvd->vdev_child[c])
newc++;
if (pvd->vdev_child[c])
newc++;
- newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_
SLEEP
);
+ newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_
PUSHPAGE
);
for (c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) {
for (c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) {
@@
-286,11
+288,12
@@
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
vdev_t *vd;
int t;
vdev_t *vd;
int t;
- vd = kmem_zalloc(sizeof (vdev_t), KM_
SLEEP
);
+ vd = kmem_zalloc(sizeof (vdev_t), KM_
PUSHPAGE
);
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
+ spa->spa_load_guid = spa_generate_guid(NULL);
}
if (guid == 0 && ops != &vdev_hole_ops) {
}
if (guid == 0 && ops != &vdev_hole_ops) {
@@
-492,7
+495,7
@@
vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
&vd->vdev_removing);
}
&vd->vdev_removing);
}
- if (parent && !parent->vdev_parent) {
+ if (parent && !parent->vdev_parent
&& alloctype != VDEV_ALLOC_ATTACH
) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
@@
-669,6
+672,8
@@
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
+ if (tvd->vdev_mg)
+ ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_ms = svd->vdev_ms;
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_ms = svd->vdev_ms;
@@
-742,6
+747,7
@@
vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
+ mvd->vdev_max_asize = cvd->vdev_max_asize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
@@
-833,7
+839,7
@@
vdev_metaslab_init(vdev_t *vd, uint64_t txg)
ASSERT(oldc <= newc);
ASSERT(oldc <= newc);
- mspp = kmem_zalloc(newc * sizeof (*mspp), KM_
SLEEP
);
+ mspp = kmem_zalloc(newc * sizeof (*mspp), KM_
PUSHPAGE | KM_NODEBUG
);
if (oldc != 0) {
bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
if (oldc != 0) {
bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
@@
-898,6
+904,8
@@
vdev_metaslab_fini(vdev_t *vd)
kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
}
kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
}
+
+ ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
}
typedef struct vdev_probe_stats {
}
typedef struct vdev_probe_stats {
@@
-988,7
+996,7
@@
vdev_probe(vdev_t *vd, zio_t *zio)
mutex_enter(&vd->vdev_probe_lock);
if ((pio = vd->vdev_probe_zio) == NULL) {
mutex_enter(&vd->vdev_probe_lock);
if ((pio = vd->vdev_probe_zio) == NULL) {
- vps = kmem_zalloc(sizeof (*vps), KM_
SLEEP
);
+ vps = kmem_zalloc(sizeof (*vps), KM_
PUSHPAGE
);
vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
@@
-1127,7
+1135,8
@@
vdev_open(vdev_t *vd)
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
- uint64_t asize, psize;
+ uint64_t max_osize = 0;
+ uint64_t asize, max_asize, psize;
uint64_t ashift = 0;
int c;
uint64_t ashift = 0;
int c;
@@
-1159,7
+1168,7
@@
vdev_open(vdev_t *vd)
return (ENXIO);
}
return (ENXIO);
}
- error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift);
+ error = vd->vdev_ops->vdev_op_open(vd, &osize, &
max_osize, &
ashift);
/*
* Reset the vdev_reopening flag so that we actually close
/*
* Reset the vdev_reopening flag so that we actually close
@@
-1217,6
+1226,7
@@
vdev_open(vdev_t *vd)
}
osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
}
osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
+ max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
@@
-1226,6
+1236,8
@@
vdev_open(vdev_t *vd)
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
+ max_asize = max_osize - (VDEV_LABEL_START_SIZE +
+ VDEV_LABEL_END_SIZE);
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
@@
-1235,6
+1247,7
@@
vdev_open(vdev_t *vd)
}
psize = 0;
asize = osize;
}
psize = 0;
asize = osize;
+ max_asize = max_osize;
}
vd->vdev_psize = psize;
}
vd->vdev_psize = psize;
@@
-1254,16
+1267,21
@@
vdev_open(vdev_t *vd)
* For testing purposes, a higher ashift can be requested.
*/
vd->vdev_asize = asize;
* For testing purposes, a higher ashift can be requested.
*/
vd->vdev_asize = asize;
+ vd->vdev_max_asize = max_asize;
vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
} else {
/*
vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
} else {
/*
- * Make sure the alignment requirement hasn't increased.
+ * Detect if the alignment requirement has increased.
+ * We don't want to make the pool unavailable, just
+ * post an event instead.
*/
*/
- if (ashift > vd->vdev_top->vdev_ashift
) {
- vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
- VDEV_AUX_BAD_LABEL);
-
return (EINVAL
);
+ if (ashift > vd->vdev_top->vdev_ashift
&&
+ vd->vdev_ops->vdev_op_leaf) {
+ zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
+
spa, vd, NULL, 0, 0
);
}
}
+
+ vd->vdev_max_asize = max_asize;
}
/*
}
/*
@@
-1305,13
+1323,18
@@
vdev_open(vdev_t *vd)
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
+ * If 'strict' is false ignore the spa guid check. This is necessary because
+ * if the machine crashed during a re-guid the new guid might have been written
+ * to all of the vdev labels, but not the cached config. The strict check
+ * will be performed when the pool is opened again using the mos config.
+ *
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
-vdev_validate(vdev_t *vd)
+vdev_validate(vdev_t *vd
, boolean_t strict
)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *label;
{
spa_t *spa = vd->vdev_spa;
nvlist_t *label;
@@
-1320,7
+1343,7
@@
vdev_validate(vdev_t *vd)
int c;
for (c = 0; c < vd->vdev_children; c++)
int c;
for (c = 0; c < vd->vdev_children; c++)
- if (vdev_validate(vd->vdev_child[c]) != 0)
+ if (vdev_validate(vd->vdev_child[c]
, strict
) != 0)
return (EBADF);
/*
return (EBADF);
/*
@@
-1350,8
+1373,9
@@
vdev_validate(vdev_t *vd)
return (0);
}
return (0);
}
- if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
- &guid) != 0 || guid != spa_guid(spa)) {
+ if (strict && (nvlist_lookup_uint64(label,
+ ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
+ guid != spa_guid(spa))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
@@
-1514,7
+1538,7
@@
vdev_reopen(vdev_t *vd)
!l2arc_vdev_present(vd))
l2arc_add_vdev(spa, vd);
} else {
!l2arc_vdev_present(vd))
l2arc_add_vdev(spa, vd);
} else {
- (void) vdev_validate(vd);
+ (void) vdev_validate(vd
, B_TRUE
);
}
/*
}
/*
@@
-2488,6
+2512,7
@@
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
vs->vs_rsize = vdev_get_min_asize(vd);
if (vd->vdev_ops->vdev_op_leaf)
vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
vs->vs_rsize = vdev_get_min_asize(vd);
if (vd->vdev_ops->vdev_op_leaf)
vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
+ vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
mutex_exit(&vd->vdev_stat_lock);
/*
mutex_exit(&vd->vdev_stat_lock);
/*
@@
-3053,13
+3078,17
@@
vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
/*
* Check the vdev configuration to ensure that it's capable of supporting
/*
* Check the vdev configuration to ensure that it's capable of supporting
- * a root pool. Currently, we do not support RAID-Z or partial configuration.
- * In addition, only a single top-level vdev is allowed and none of the leaves
- * can be wholedisks.
+ * a root pool.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
+#if defined(__sun__) || defined(__sun)
+ /*
+ * Currently, we do not support RAID-Z or partial configuration.
+ * In addition, only a single top-level vdev is allowed and none of the
+ * leaves can be wholedisks.
+ */
int c;
if (!vd->vdev_ops->vdev_op_leaf) {
int c;
if (!vd->vdev_ops->vdev_op_leaf) {
@@
-3080,6
+3109,7
@@
vdev_is_bootable(vdev_t *vd)
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
+#endif /* __sun__ || __sun */
return (B_TRUE);
}
return (B_TRUE);
}
@@
-3174,4
+3204,7
@@
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
+
+module_param(zfs_scrub_limit, int, 0644);
+MODULE_PARM_DESC(zfs_scrub_limit, "Max scrub/resilver I/O per leaf vdev");
#endif
#endif