forked from luck/tmp_suning_uos_patched
ovl: generalize the lower_layers[] array
Rename lower_layers[] array to layers[], extend its size by one and initialize layers[0] with upper layer values. Lower layers are now addressed with index 1..numlower. layers[0] is reserved even with lower only overlay. [SzM: replace ofs->numlower with ofs->numlayer, the latter's value is incremented by one] Signed-off-by: Amir Goldstein <amir73il@gmail.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
parent
b504c6540d
commit
94375f9d51
|
@ -424,7 +424,6 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
|
|||
struct ovl_layer *layer)
|
||||
{
|
||||
struct ovl_fs *ofs = sb->s_fs_info;
|
||||
struct ovl_layer upper_layer = { .mnt = ofs->upper_mnt };
|
||||
struct dentry *index = NULL;
|
||||
struct dentry *this = NULL;
|
||||
struct inode *inode;
|
||||
|
@ -466,7 +465,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
|
|||
* recursive call walks back from indexed upper to the topmost
|
||||
* connected/hashed upper parent (or up to root).
|
||||
*/
|
||||
this = ovl_lookup_real(sb, upper, &upper_layer);
|
||||
this = ovl_lookup_real(sb, upper, &ofs->layers[0]);
|
||||
dput(upper);
|
||||
}
|
||||
|
||||
|
@ -646,8 +645,7 @@ static struct dentry *ovl_get_dentry(struct super_block *sb,
|
|||
struct dentry *index)
|
||||
{
|
||||
struct ovl_fs *ofs = sb->s_fs_info;
|
||||
struct ovl_layer upper_layer = { .mnt = ofs->upper_mnt };
|
||||
struct ovl_layer *layer = upper ? &upper_layer : lowerpath->layer;
|
||||
struct ovl_layer *layer = upper ? &ofs->layers[0] : lowerpath->layer;
|
||||
struct dentry *real = upper ?: (index ?: lowerpath->dentry);
|
||||
|
||||
/*
|
||||
|
|
|
@ -322,16 +322,16 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
|
|||
struct dentry *origin = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ofs->numlower; i++) {
|
||||
for (i = 1; i < ofs->numlayer; i++) {
|
||||
/*
|
||||
* If lower fs uuid is not unique among lower fs we cannot match
|
||||
* fh->uuid to layer.
|
||||
*/
|
||||
if (ofs->lower_layers[i].fsid &&
|
||||
ofs->lower_layers[i].fs->bad_uuid)
|
||||
if (ofs->layers[i].fsid &&
|
||||
ofs->layers[i].fs->bad_uuid)
|
||||
continue;
|
||||
|
||||
origin = ovl_decode_real_fh(fh, ofs->lower_layers[i].mnt,
|
||||
origin = ovl_decode_real_fh(fh, ofs->layers[i].mnt,
|
||||
connected);
|
||||
if (origin)
|
||||
break;
|
||||
|
@ -354,7 +354,7 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
|
|||
}
|
||||
**stackp = (struct ovl_path){
|
||||
.dentry = origin,
|
||||
.layer = &ofs->lower_layers[i]
|
||||
.layer = &ofs->layers[i]
|
||||
};
|
||||
|
||||
return 0;
|
||||
|
@ -885,7 +885,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
|||
|
||||
if (!d.stop && poe->numlower) {
|
||||
err = -ENOMEM;
|
||||
stack = kcalloc(ofs->numlower, sizeof(struct ovl_path),
|
||||
stack = kcalloc(ofs->numlayer - 1, sizeof(struct ovl_path),
|
||||
GFP_KERNEL);
|
||||
if (!stack)
|
||||
goto out_put_upper;
|
||||
|
|
|
@ -45,10 +45,10 @@ struct ovl_path {
|
|||
/* private information held for overlayfs's superblock */
|
||||
struct ovl_fs {
|
||||
struct vfsmount *upper_mnt;
|
||||
unsigned int numlower;
|
||||
unsigned int numlayer;
|
||||
/* Number of unique lower sb that differ from upper sb */
|
||||
unsigned int numlowerfs;
|
||||
struct ovl_layer *lower_layers;
|
||||
struct ovl_layer *layers;
|
||||
struct ovl_sb *lower_fs;
|
||||
/* workbasedir is the path at workdir= mount option */
|
||||
struct dentry *workbasedir;
|
||||
|
|
|
@ -224,13 +224,13 @@ static void ovl_free_fs(struct ovl_fs *ofs)
|
|||
if (ofs->upperdir_locked)
|
||||
ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
|
||||
mntput(ofs->upper_mnt);
|
||||
for (i = 0; i < ofs->numlower; i++) {
|
||||
iput(ofs->lower_layers[i].trap);
|
||||
mntput(ofs->lower_layers[i].mnt);
|
||||
for (i = 1; i < ofs->numlayer; i++) {
|
||||
iput(ofs->layers[i].trap);
|
||||
mntput(ofs->layers[i].mnt);
|
||||
}
|
||||
for (i = 0; i < ofs->numlowerfs; i++)
|
||||
free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
|
||||
kfree(ofs->lower_layers);
|
||||
kfree(ofs->layers);
|
||||
kfree(ofs->lower_fs);
|
||||
|
||||
kfree(ofs->config.lowerdir);
|
||||
|
@ -1319,16 +1319,16 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
|
|||
return ofs->numlowerfs;
|
||||
}
|
||||
|
||||
static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
|
||||
struct path *stack, unsigned int numlower)
|
||||
static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
|
||||
struct path *stack, unsigned int numlower)
|
||||
{
|
||||
int err;
|
||||
unsigned int i;
|
||||
|
||||
err = -ENOMEM;
|
||||
ofs->lower_layers = kcalloc(numlower, sizeof(struct ovl_layer),
|
||||
GFP_KERNEL);
|
||||
if (ofs->lower_layers == NULL)
|
||||
ofs->layers = kcalloc(numlower + 1, sizeof(struct ovl_layer),
|
||||
GFP_KERNEL);
|
||||
if (ofs->layers == NULL)
|
||||
goto out;
|
||||
|
||||
ofs->lower_fs = kcalloc(numlower, sizeof(struct ovl_sb),
|
||||
|
@ -1336,6 +1336,12 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
|
|||
if (ofs->lower_fs == NULL)
|
||||
goto out;
|
||||
|
||||
/* idx 0 is reserved for upper fs even with lower only overlay */
|
||||
ofs->layers[0].mnt = ofs->upper_mnt;
|
||||
ofs->layers[0].idx = 0;
|
||||
ofs->layers[0].fsid = 0;
|
||||
ofs->numlayer = 1;
|
||||
|
||||
for (i = 0; i < numlower; i++) {
|
||||
struct vfsmount *mnt;
|
||||
struct inode *trap;
|
||||
|
@ -1369,15 +1375,15 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
|
|||
*/
|
||||
mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
|
||||
|
||||
ofs->lower_layers[ofs->numlower].trap = trap;
|
||||
ofs->lower_layers[ofs->numlower].mnt = mnt;
|
||||
ofs->lower_layers[ofs->numlower].idx = i + 1;
|
||||
ofs->lower_layers[ofs->numlower].fsid = fsid;
|
||||
ofs->layers[ofs->numlayer].trap = trap;
|
||||
ofs->layers[ofs->numlayer].mnt = mnt;
|
||||
ofs->layers[ofs->numlayer].idx = ofs->numlayer;
|
||||
ofs->layers[ofs->numlayer].fsid = fsid;
|
||||
if (fsid) {
|
||||
ofs->lower_layers[ofs->numlower].fs =
|
||||
ofs->layers[ofs->numlayer].fs =
|
||||
&ofs->lower_fs[fsid - 1];
|
||||
}
|
||||
ofs->numlower++;
|
||||
ofs->numlayer++;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1464,7 +1470,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
err = ovl_get_lower_layers(sb, ofs, stack, numlower);
|
||||
err = ovl_get_layers(sb, ofs, stack, numlower);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
@ -1475,7 +1481,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
|
|||
|
||||
for (i = 0; i < numlower; i++) {
|
||||
oe->lowerstack[i].dentry = dget(stack[i].dentry);
|
||||
oe->lowerstack[i].layer = &ofs->lower_layers[i];
|
||||
oe->lowerstack[i].layer = &ofs->layers[i+1];
|
||||
}
|
||||
|
||||
if (remote)
|
||||
|
@ -1556,9 +1562,9 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
|
|||
return err;
|
||||
}
|
||||
|
||||
for (i = 0; i < ofs->numlower; i++) {
|
||||
for (i = 1; i < ofs->numlayer; i++) {
|
||||
err = ovl_check_layer(sb, ofs,
|
||||
ofs->lower_layers[i].mnt->mnt_root,
|
||||
ofs->layers[i].mnt->mnt_root,
|
||||
"lowerdir");
|
||||
if (err)
|
||||
return err;
|
||||
|
|
Loading…
Reference in New Issue
Block a user