forked from luck/tmp_suning_uos_patched
Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6
* 'linux-next' of git://git.infradead.org/ubifs-2.6: UBIFS: fix-up free space earlier UBIFS: intialize LPT earlier UBIFS: assert no fixup when writing a node UBIFS: fix clean znode counter corruption in error cases UBIFS: fix memory leak on error path UBIFS: fix shrinker object count reports UBIFS: fix recovery broken by the previous recovery fix UBIFS: amend ubifs_recover_leb interface UBIFS: introduce a "grouped" journal head flag UBIFS: supress false error messages
This commit is contained in:
commit
3af91a1256
@ -581,6 +581,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
|
|||||||
ubifs_assert(wbuf->size % c->min_io_size == 0);
|
ubifs_assert(wbuf->size % c->min_io_size == 0);
|
||||||
ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
|
ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
|
||||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||||
|
ubifs_assert(!c->space_fixup);
|
||||||
if (c->leb_size - wbuf->offs >= c->max_write_size)
|
if (c->leb_size - wbuf->offs >= c->max_write_size)
|
||||||
ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
|
ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
|
||||||
|
|
||||||
@ -759,6 +760,7 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
|
|||||||
ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
|
ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
|
||||||
ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
|
ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
|
||||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||||
|
ubifs_assert(!c->space_fixup);
|
||||||
|
|
||||||
if (c->ro_error)
|
if (c->ro_error)
|
||||||
return -EROFS;
|
return -EROFS;
|
||||||
|
@ -669,6 +669,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
|
|||||||
|
|
||||||
out_release:
|
out_release:
|
||||||
release_head(c, BASEHD);
|
release_head(c, BASEHD);
|
||||||
|
kfree(dent);
|
||||||
out_ro:
|
out_ro:
|
||||||
ubifs_ro_mode(c, err);
|
ubifs_ro_mode(c, err);
|
||||||
if (last_reference)
|
if (last_reference)
|
||||||
|
@ -674,7 +674,7 @@ static int kill_orphans(struct ubifs_info *c)
|
|||||||
if (IS_ERR(sleb)) {
|
if (IS_ERR(sleb)) {
|
||||||
if (PTR_ERR(sleb) == -EUCLEAN)
|
if (PTR_ERR(sleb) == -EUCLEAN)
|
||||||
sleb = ubifs_recover_leb(c, lnum, 0,
|
sleb = ubifs_recover_leb(c, lnum, 0,
|
||||||
c->sbuf, 0);
|
c->sbuf, -1);
|
||||||
if (IS_ERR(sleb)) {
|
if (IS_ERR(sleb)) {
|
||||||
err = PTR_ERR(sleb);
|
err = PTR_ERR(sleb);
|
||||||
break;
|
break;
|
||||||
|
@ -564,19 +564,15 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drop_last_node - drop the last node or group of nodes.
|
* drop_last_group - drop the last group of nodes.
|
||||||
* @sleb: scanned LEB information
|
* @sleb: scanned LEB information
|
||||||
* @offs: offset of dropped nodes is returned here
|
* @offs: offset of dropped nodes is returned here
|
||||||
* @grouped: non-zero if whole group of nodes have to be dropped
|
|
||||||
*
|
*
|
||||||
* This is a helper function for 'ubifs_recover_leb()' which drops the last
|
* This is a helper function for 'ubifs_recover_leb()' which drops the last
|
||||||
* node of the scanned LEB or the last group of nodes if @grouped is not zero.
|
* group of nodes of the scanned LEB.
|
||||||
* This function returns %1 if a node was dropped and %0 otherwise.
|
|
||||||
*/
|
*/
|
||||||
static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
|
static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
|
||||||
{
|
{
|
||||||
int dropped = 0;
|
|
||||||
|
|
||||||
while (!list_empty(&sleb->nodes)) {
|
while (!list_empty(&sleb->nodes)) {
|
||||||
struct ubifs_scan_node *snod;
|
struct ubifs_scan_node *snod;
|
||||||
struct ubifs_ch *ch;
|
struct ubifs_ch *ch;
|
||||||
@ -585,17 +581,40 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
|
|||||||
list);
|
list);
|
||||||
ch = snod->node;
|
ch = snod->node;
|
||||||
if (ch->group_type != UBIFS_IN_NODE_GROUP)
|
if (ch->group_type != UBIFS_IN_NODE_GROUP)
|
||||||
return dropped;
|
break;
|
||||||
dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs);
|
|
||||||
|
dbg_rcvry("dropping grouped node at %d:%d",
|
||||||
|
sleb->lnum, snod->offs);
|
||||||
|
*offs = snod->offs;
|
||||||
|
list_del(&snod->list);
|
||||||
|
kfree(snod);
|
||||||
|
sleb->nodes_cnt -= 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drop_last_node - drop the last node.
|
||||||
|
* @sleb: scanned LEB information
|
||||||
|
* @offs: offset of dropped nodes is returned here
|
||||||
|
* @grouped: non-zero if whole group of nodes have to be dropped
|
||||||
|
*
|
||||||
|
* This is a helper function for 'ubifs_recover_leb()' which drops the last
|
||||||
|
* node of the scanned LEB.
|
||||||
|
*/
|
||||||
|
static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
|
||||||
|
{
|
||||||
|
struct ubifs_scan_node *snod;
|
||||||
|
|
||||||
|
if (!list_empty(&sleb->nodes)) {
|
||||||
|
snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
|
||||||
|
list);
|
||||||
|
|
||||||
|
dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
|
||||||
*offs = snod->offs;
|
*offs = snod->offs;
|
||||||
list_del(&snod->list);
|
list_del(&snod->list);
|
||||||
kfree(snod);
|
kfree(snod);
|
||||||
sleb->nodes_cnt -= 1;
|
sleb->nodes_cnt -= 1;
|
||||||
dropped = 1;
|
|
||||||
if (!grouped)
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
return dropped;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -604,7 +623,8 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
|
|||||||
* @lnum: LEB number
|
* @lnum: LEB number
|
||||||
* @offs: offset
|
* @offs: offset
|
||||||
* @sbuf: LEB-sized buffer to use
|
* @sbuf: LEB-sized buffer to use
|
||||||
* @grouped: nodes may be grouped for recovery
|
* @jhead: journal head number this LEB belongs to (%-1 if the LEB does not
|
||||||
|
* belong to any journal head)
|
||||||
*
|
*
|
||||||
* This function does a scan of a LEB, but caters for errors that might have
|
* This function does a scan of a LEB, but caters for errors that might have
|
||||||
* been caused by the unclean unmount from which we are attempting to recover.
|
* been caused by the unclean unmount from which we are attempting to recover.
|
||||||
@ -612,13 +632,14 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
|
|||||||
* found, and a negative error code in case of failure.
|
* found, and a negative error code in case of failure.
|
||||||
*/
|
*/
|
||||||
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
|
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
|
||||||
int offs, void *sbuf, int grouped)
|
int offs, void *sbuf, int jhead)
|
||||||
{
|
{
|
||||||
int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
|
int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
|
||||||
|
int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
|
||||||
struct ubifs_scan_leb *sleb;
|
struct ubifs_scan_leb *sleb;
|
||||||
void *buf = sbuf + offs;
|
void *buf = sbuf + offs;
|
||||||
|
|
||||||
dbg_rcvry("%d:%d", lnum, offs);
|
dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);
|
||||||
|
|
||||||
sleb = ubifs_start_scan(c, lnum, offs, sbuf);
|
sleb = ubifs_start_scan(c, lnum, offs, sbuf);
|
||||||
if (IS_ERR(sleb))
|
if (IS_ERR(sleb))
|
||||||
@ -635,7 +656,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
|
|||||||
* Scan quietly until there is an error from which we cannot
|
* Scan quietly until there is an error from which we cannot
|
||||||
* recover
|
* recover
|
||||||
*/
|
*/
|
||||||
ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
|
ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
|
||||||
if (ret == SCANNED_A_NODE) {
|
if (ret == SCANNED_A_NODE) {
|
||||||
/* A valid node, and not a padding node */
|
/* A valid node, and not a padding node */
|
||||||
struct ubifs_ch *ch = buf;
|
struct ubifs_ch *ch = buf;
|
||||||
@ -695,59 +716,62 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
|
|||||||
* If nodes are grouped, always drop the incomplete group at
|
* If nodes are grouped, always drop the incomplete group at
|
||||||
* the end.
|
* the end.
|
||||||
*/
|
*/
|
||||||
drop_last_node(sleb, &offs, 1);
|
drop_last_group(sleb, &offs);
|
||||||
|
|
||||||
/*
|
if (jhead == GCHD) {
|
||||||
* While we are in the middle of the same min. I/O unit keep dropping
|
/*
|
||||||
* nodes. So basically, what we want is to make sure that the last min.
|
* If this LEB belongs to the GC head then while we are in the
|
||||||
* I/O unit where we saw the corruption is dropped completely with all
|
* middle of the same min. I/O unit keep dropping nodes. So
|
||||||
* the uncorrupted node which may possibly sit there.
|
* basically, what we want is to make sure that the last min.
|
||||||
*
|
* I/O unit where we saw the corruption is dropped completely
|
||||||
* In other words, let's name the min. I/O unit where the corruption
|
* with all the uncorrupted nodes which may possibly sit there.
|
||||||
* starts B, and the previous min. I/O unit A. The below code tries to
|
*
|
||||||
* deal with a situation when half of B contains valid nodes or the end
|
* In other words, let's name the min. I/O unit where the
|
||||||
* of a valid node, and the second half of B contains corrupted data or
|
* corruption starts B, and the previous min. I/O unit A. The
|
||||||
* garbage. This means that UBIFS had been writing to B just before the
|
* below code tries to deal with a situation when half of B
|
||||||
* power cut happened. I do not know how realistic is this scenario
|
* contains valid nodes or the end of a valid node, and the
|
||||||
* that half of the min. I/O unit had been written successfully and the
|
* second half of B contains corrupted data or garbage. This
|
||||||
* other half not, but this is possible in our 'failure mode emulation'
|
* means that UBIFS had been writing to B just before the power
|
||||||
* infrastructure at least.
|
* cut happened. I do not know how realistic is this scenario
|
||||||
*
|
* that half of the min. I/O unit had been written successfully
|
||||||
* So what is the problem, why we need to drop those nodes? Whey can't
|
* and the other half not, but this is possible in our 'failure
|
||||||
* we just clean-up the second half of B by putting a padding node
|
* mode emulation' infrastructure at least.
|
||||||
* there? We can, and this works fine with one exception which was
|
*
|
||||||
* reproduced with power cut emulation testing and happens extremely
|
* So what is the problem, why we need to drop those nodes? Why
|
||||||
* rarely. The description follows, but it is worth noting that that is
|
* can't we just clean-up the second half of B by putting a
|
||||||
* only about the GC head, so we could do this trick only if the bud
|
* padding node there? We can, and this works fine with one
|
||||||
* belongs to the GC head, but it does not seem to be worth an
|
* exception which was reproduced with power cut emulation
|
||||||
* additional "if" statement.
|
* testing and happens extremely rarely.
|
||||||
*
|
*
|
||||||
* So, imagine the file-system is full, we run GC which is moving valid
|
* Imagine the file-system is full, we run GC which starts
|
||||||
* nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head
|
* moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
|
||||||
* LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X
|
* the current GC head LEB). The @c->gc_lnum is -1, which means
|
||||||
* and will try to continue. Imagine that LEB X is currently the
|
* that GC will retain LEB X and will try to continue. Imagine
|
||||||
* dirtiest LEB, and the amount of used space in LEB Y is exactly the
|
* that LEB X is currently the dirtiest LEB, and the amount of
|
||||||
* same as amount of free space in LEB X.
|
* used space in LEB Y is exactly the same as amount of free
|
||||||
*
|
* space in LEB X.
|
||||||
* And a power cut happens when nodes are moved from LEB X to LEB Y. We
|
*
|
||||||
* are here trying to recover LEB Y which is the GC head LEB. We find
|
* And a power cut happens when nodes are moved from LEB X to
|
||||||
* the min. I/O unit B as described above. Then we clean-up LEB Y by
|
* LEB Y. We are here trying to recover LEB Y which is the GC
|
||||||
* padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function
|
* head LEB. We find the min. I/O unit B as described above.
|
||||||
* fails, because it cannot find a dirty LEB which could be GC'd into
|
* Then we clean-up LEB Y by padding min. I/O unit. And later
|
||||||
* LEB Y! Even LEB X does not match because the amount of valid nodes
|
* 'ubifs_rcvry_gc_commit()' function fails, because it cannot
|
||||||
* there does not fit the free space in LEB Y any more! And this is
|
* find a dirty LEB which could be GC'd into LEB Y! Even LEB X
|
||||||
* because of the padding node which we added to LEB Y. The
|
* does not match because the amount of valid nodes there does
|
||||||
* user-visible effect of this which I once observed and analysed is
|
* not fit the free space in LEB Y any more! And this is
|
||||||
* that we cannot mount the file-system with -ENOSPC error.
|
* because of the padding node which we added to LEB Y. The
|
||||||
*
|
* user-visible effect of this which I once observed and
|
||||||
* So obviously, to make sure that situation does not happen we should
|
* analysed is that we cannot mount the file-system with
|
||||||
* free min. I/O unit B in LEB Y completely and the last used min. I/O
|
* -ENOSPC error.
|
||||||
* unit in LEB Y should be A. This is basically what the below code
|
*
|
||||||
* tries to do.
|
* So obviously, to make sure that situation does not happen we
|
||||||
*/
|
* should free min. I/O unit B in LEB Y completely and the last
|
||||||
while (min_io_unit == round_down(offs, c->min_io_size) &&
|
* used min. I/O unit in LEB Y should be A. This is basically
|
||||||
min_io_unit != offs &&
|
* what the below code tries to do.
|
||||||
drop_last_node(sleb, &offs, grouped));
|
*/
|
||||||
|
while (offs > min_io_unit)
|
||||||
|
drop_last_node(sleb, &offs);
|
||||||
|
}
|
||||||
|
|
||||||
buf = sbuf + offs;
|
buf = sbuf + offs;
|
||||||
len = c->leb_size - offs;
|
len = c->leb_size - offs;
|
||||||
@ -881,7 +905,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
|
|||||||
}
|
}
|
||||||
ubifs_scan_destroy(sleb);
|
ubifs_scan_destroy(sleb);
|
||||||
}
|
}
|
||||||
return ubifs_recover_leb(c, lnum, offs, sbuf, 0);
|
return ubifs_recover_leb(c, lnum, offs, sbuf, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -557,8 +557,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
|
|||||||
* these LEBs could possibly be written to at the power cut
|
* these LEBs could possibly be written to at the power cut
|
||||||
* time.
|
* time.
|
||||||
*/
|
*/
|
||||||
sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf,
|
sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead);
|
||||||
b->bud->jhead != GCHD);
|
|
||||||
else
|
else
|
||||||
sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
|
sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
|
||||||
if (IS_ERR(sleb))
|
if (IS_ERR(sleb))
|
||||||
|
@ -284,7 +284,11 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc)
|
|||||||
long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
|
long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
|
||||||
|
|
||||||
if (nr == 0)
|
if (nr == 0)
|
||||||
return clean_zn_cnt;
|
/*
|
||||||
|
* Due to the way UBIFS updates the clean znode counter it may
|
||||||
|
* temporarily be negative.
|
||||||
|
*/
|
||||||
|
return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
|
||||||
|
|
||||||
if (!clean_zn_cnt) {
|
if (!clean_zn_cnt) {
|
||||||
/*
|
/*
|
||||||
|
@ -811,15 +811,18 @@ static int alloc_wbufs(struct ubifs_info *c)
|
|||||||
|
|
||||||
c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
|
c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
|
||||||
c->jheads[i].wbuf.jhead = i;
|
c->jheads[i].wbuf.jhead = i;
|
||||||
|
c->jheads[i].grouped = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM;
|
c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM;
|
||||||
/*
|
/*
|
||||||
* Garbage Collector head likely contains long-term data and
|
* Garbage Collector head likely contains long-term data and
|
||||||
* does not need to be synchronized by timer.
|
* does not need to be synchronized by timer. Also GC head nodes are
|
||||||
|
* not grouped.
|
||||||
*/
|
*/
|
||||||
c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
|
c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
|
||||||
c->jheads[GCHD].wbuf.no_timer = 1;
|
c->jheads[GCHD].wbuf.no_timer = 1;
|
||||||
|
c->jheads[GCHD].grouped = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1284,12 +1287,25 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||||||
if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
|
if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
|
||||||
ubifs_msg("recovery needed");
|
ubifs_msg("recovery needed");
|
||||||
c->need_recovery = 1;
|
c->need_recovery = 1;
|
||||||
if (!c->ro_mount) {
|
}
|
||||||
err = ubifs_recover_inl_heads(c, c->sbuf);
|
|
||||||
if (err)
|
if (c->need_recovery && !c->ro_mount) {
|
||||||
goto out_master;
|
err = ubifs_recover_inl_heads(c, c->sbuf);
|
||||||
}
|
if (err)
|
||||||
} else if (!c->ro_mount) {
|
goto out_master;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ubifs_lpt_init(c, 1, !c->ro_mount);
|
||||||
|
if (err)
|
||||||
|
goto out_master;
|
||||||
|
|
||||||
|
if (!c->ro_mount && c->space_fixup) {
|
||||||
|
err = ubifs_fixup_free_space(c);
|
||||||
|
if (err)
|
||||||
|
goto out_master;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!c->ro_mount) {
|
||||||
/*
|
/*
|
||||||
* Set the "dirty" flag so that if we reboot uncleanly we
|
* Set the "dirty" flag so that if we reboot uncleanly we
|
||||||
* will notice this immediately on the next mount.
|
* will notice this immediately on the next mount.
|
||||||
@ -1297,13 +1313,9 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||||||
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
|
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
|
||||||
err = ubifs_write_master(c);
|
err = ubifs_write_master(c);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_master;
|
goto out_lpt;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ubifs_lpt_init(c, 1, !c->ro_mount);
|
|
||||||
if (err)
|
|
||||||
goto out_lpt;
|
|
||||||
|
|
||||||
err = dbg_check_idx_size(c, c->bi.old_idx_sz);
|
err = dbg_check_idx_size(c, c->bi.old_idx_sz);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_lpt;
|
goto out_lpt;
|
||||||
@ -1396,12 +1408,6 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||||||
} else
|
} else
|
||||||
ubifs_assert(c->lst.taken_empty_lebs > 0);
|
ubifs_assert(c->lst.taken_empty_lebs > 0);
|
||||||
|
|
||||||
if (!c->ro_mount && c->space_fixup) {
|
|
||||||
err = ubifs_fixup_free_space(c);
|
|
||||||
if (err)
|
|
||||||
goto out_infos;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = dbg_check_filesystem(c);
|
err = dbg_check_filesystem(c);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_infos;
|
goto out_infos;
|
||||||
|
@ -2876,12 +2876,13 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
|
|||||||
*/
|
*/
|
||||||
void ubifs_tnc_close(struct ubifs_info *c)
|
void ubifs_tnc_close(struct ubifs_info *c)
|
||||||
{
|
{
|
||||||
long clean_freed;
|
|
||||||
|
|
||||||
tnc_destroy_cnext(c);
|
tnc_destroy_cnext(c);
|
||||||
if (c->zroot.znode) {
|
if (c->zroot.znode) {
|
||||||
clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
|
long n;
|
||||||
atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt);
|
|
||||||
|
ubifs_destroy_tnc_subtree(c->zroot.znode);
|
||||||
|
n = atomic_long_read(&c->clean_zn_cnt);
|
||||||
|
atomic_long_sub(n, &ubifs_clean_zn_cnt);
|
||||||
}
|
}
|
||||||
kfree(c->gap_lebs);
|
kfree(c->gap_lebs);
|
||||||
kfree(c->ilebs);
|
kfree(c->ilebs);
|
||||||
|
@ -722,12 +722,14 @@ struct ubifs_bud {
|
|||||||
* struct ubifs_jhead - journal head.
|
* struct ubifs_jhead - journal head.
|
||||||
* @wbuf: head's write-buffer
|
* @wbuf: head's write-buffer
|
||||||
* @buds_list: list of bud LEBs belonging to this journal head
|
* @buds_list: list of bud LEBs belonging to this journal head
|
||||||
|
* @grouped: non-zero if UBIFS groups nodes when writing to this journal head
|
||||||
*
|
*
|
||||||
* Note, the @buds list is protected by the @c->buds_lock.
|
* Note, the @buds list is protected by the @c->buds_lock.
|
||||||
*/
|
*/
|
||||||
struct ubifs_jhead {
|
struct ubifs_jhead {
|
||||||
struct ubifs_wbuf wbuf;
|
struct ubifs_wbuf wbuf;
|
||||||
struct list_head buds_list;
|
struct list_head buds_list;
|
||||||
|
unsigned int grouped:1;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1742,7 +1744,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
|
|||||||
int ubifs_recover_master_node(struct ubifs_info *c);
|
int ubifs_recover_master_node(struct ubifs_info *c);
|
||||||
int ubifs_write_rcvrd_mst_node(struct ubifs_info *c);
|
int ubifs_write_rcvrd_mst_node(struct ubifs_info *c);
|
||||||
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
|
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
|
||||||
int offs, void *sbuf, int grouped);
|
int offs, void *sbuf, int jhead);
|
||||||
struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
|
struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
|
||||||
int offs, void *sbuf);
|
int offs, void *sbuf);
|
||||||
int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf);
|
int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf);
|
||||||
|
Loading…
Reference in New Issue
Block a user