git://git.camperquake.de
/
zfs.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Use stored whole_disk property when opening a vdev
[zfs.git]
/
module
/
zfs
/
zil.c
diff --git
a/module/zfs/zil.c
b/module/zfs/zil.c
index
c66313f
..
ad11fd6
100644
(file)
--- a/
module/zfs/zil.c
+++ b/
module/zfs/zil.c
@@
-296,6
+296,8
@@
zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
char *lrbuf, *lrp;
int error = 0;
char *lrbuf, *lrp;
int error = 0;
+ bzero(&next_blk, sizeof(blkptr_t));
+
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
@@
-317,7
+319,7
@@
zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
- char *end;
+ char *end
= NULL
;
if (blk_seq > claim_blk_seq)
break;
if (blk_seq > claim_blk_seq)
break;
@@
-1073,7
+1075,7
@@
zil_itx_create(uint64_t txtype, size_t lrsize)
lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
- itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
+ itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP
|KM_NODEBUG
);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
@@
-1299,7
+1301,7
@@
zil_clean(zilog_t *zilog, uint64_t synced_txg)
* created a bad performance problem.
*/
if (taskq_dispatch(zilog->zl_clean_taskq,
* created a bad performance problem.
*/
if (taskq_dispatch(zilog->zl_clean_taskq,
- (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) ==
NULL
)
+ (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) ==
0
)
zil_itxg_clean(clean_me);
}
zil_itxg_clean(clean_me);
}
@@
-1421,7
+1423,7
@@
zil_commit_writer(zilog_t *zilog)
}
DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
}
DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
- while (
itx = list_head(&zilog->zl_itx_commit_list
)) {
+ while (
(itx = list_head(&zilog->zl_itx_commit_list)
)) {
txg = itx->itx_lr.lrc_txg;
ASSERT(txg);
txg = itx->itx_lr.lrc_txg;
ASSERT(txg);
@@
-1620,6
+1622,7
@@
zilog_t *
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
+ int i;
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
@@
-1634,7
+1637,7
@@
zil_alloc(objset_t *os, zil_header_t *zh_phys)
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
- for (i
nt i
= 0; i < TXG_SIZE; i++) {
+ for (i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL);
}
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL);
}
@@
-1662,6
+1665,7
@@
void
zil_free(zilog_t *zilog)
{
lwb_t *head_lwb;
zil_free(zilog_t *zilog)
{
lwb_t *head_lwb;
+ int i;
zilog->zl_stop_sync = 1;
zilog->zl_stop_sync = 1;
@@
-1683,7
+1687,7
@@
zil_free(zilog_t *zilog)
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
- for (i
nt i
= 0; i < TXG_SIZE; i++) {
+ for (i = 0; i < TXG_SIZE; i++) {
/*
* It's possible for an itx to be generated that doesn't dirty
* a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
/*
* It's possible for an itx to be generated that doesn't dirty
* a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
@@
-1935,7
+1939,7
@@
zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
- zr.zr_lr =
k
mem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
+ zr.zr_lr =
v
mem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
/*
* Wait for in-progress removes to sync before starting replay.
@@
-1947,7
+1951,7
@@
zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg);
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg);
-
k
mem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
+
v
mem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);