user/Makefile | 2 +- user/buffer.c | 23 +++-- user/filemap.c | 3 +- user/inode.c | 3 +- user/kernel/Makefile | 3 - user/kernel/balloc.c | 37 +++++--- user/kernel/btree.c | 117 +++++++++++++++--------- user/kernel/commit.c | 4 +- user/kernel/commit_flusher_hack.c | 13 ++- user/kernel/dir.c | 26 ++++-- user/kernel/dleaf.c | 182 ++++++++++++++++++++++++++------------ user/kernel/dleaf2.c | 85 ++++++++---------- user/kernel/filemap.c | 56 ++++++++---- user/kernel/iattr.c | 31 ++++--- user/kernel/ileaf.c | 93 +++++++++++++------ user/kernel/inode.c | 34 +++---- user/kernel/log.c | 15 ++-- user/kernel/namei.c | 3 +- user/kernel/orphan.c | 12 ++- user/kernel/utility.c | 4 +- user/kernel/xattr.c | 120 +++++++++++++++++-------- user/options.c | 4 +- user/super.c | 5 +- user/tux3.c | 11 ++- user/tux3_dump.c | 12 ++- user/tux3_graph.c | 31 ++++--- user/walk.c | 7 +- 27 files changed, 597 insertions(+), 339 deletions(-) diff --git a/user/Makefile b/user/Makefile index a276dcd..6d825b6 100644 --- a/user/Makefile +++ b/user/Makefile @@ -12,7 +12,7 @@ ifeq ($(ARCH),i686) CFLAGS += -m32 endif -CFLAGS += -std=gnu99 -Wall -g -rdynamic -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 +CFLAGS += -Wall -g -rdynamic -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 CFLAGS += -I$(TOPDIR) # gcc warning options CFLAGS += -Wall -Wextra -Werror diff --git a/user/buffer.c b/user/buffer.c index c8c8273..7412330 100644 --- a/user/buffer.c +++ b/user/buffer.c @@ -98,7 +98,9 @@ void show_buffer_list(struct list_head *list) void show_dirty_buffers(map_t *map) { - for (int i = 0; i < BUFFER_DIRTY_STATES; i++) { + int i; + + for (i = 0; i < BUFFER_DIRTY_STATES; i++) { printf("map %p dirty [%d]: ", map, i); show_buffer_list(tux3_dirty_buffers(map->inode, i)); } @@ -538,12 +540,13 @@ void invalidate_buffers(map_t *map) static void destroy_buffers(void) { struct buffer_head *buffer, *safe; - struct list_head *head; + int i; /* If debug_buffer, buffer should already be freed */ - for (int i = 0; i < BUFFER_STATES; i++) { - head = buffers + i; + for (i = 0; i < BUFFER_STATES; i++) { + struct list_head *head = buffers + i; + if (!debug_buffer) { list_for_each_entry_safe(buffer, safe, head, link) { list_del(&buffer->lru); @@ -634,9 +637,11 @@ error: void init_buffers(struct dev *dev, unsigned poolsize, int debug) { + int i; + debug_buffer = debug; INIT_LIST_HEAD(&lru_buffers); - for (int i = 0; i < BUFFER_STATES; i++) + for (i = 0; i < BUFFER_STATES; i++) INIT_LIST_HEAD(buffers + i); unsigned bufsize = 1 << dev->bits; @@ -681,19 +686,23 @@ int dev_errio(int rw, struct bufvec *bufvec) map_t *new_map(struct dev *dev, blockio_t *io) { + int i; map_t *map = malloc(sizeof(*map)); // error??? + *map = (map_t){ .dev = dev, .io = io ? io : dev_blockio }; - for (int i = 0; i < BUFFER_BUCKETS; i++) + for (i = 0; i < BUFFER_BUCKETS; i++) INIT_HLIST_HEAD(&map->hash[i]); return map; } void free_map(map_t *map) { - for (int i = 0; i < BUFFER_BUCKETS; i++) { + int i; + + for (i = 0; i < BUFFER_BUCKETS; i++) { struct hlist_head *bucket = &map->hash[i]; struct buffer_head *buffer; struct hlist_node *n; diff --git a/user/filemap.c b/user/filemap.c index 2b49c7d..fd0d1c3 100644 --- a/user/filemap.c +++ b/user/filemap.c @@ -124,6 +124,7 @@ static int filemap_extent_io(enum map_mode mode, int rw, struct bufvec *bufvec) struct inode *inode = bufvec_inode(bufvec); block_t block, index = bufvec_contig_index(bufvec); int err; + int i; /* FIXME: now assuming buffer is only 1 for MAP_READ */ assert(mode != MAP_READ || bufvec_contig_count(bufvec) == 1); @@ -151,7 +152,7 @@ static int filemap_extent_io(enum map_mode mode, int rw, struct bufvec *bufvec) return segs; assert(segs); - for (int i = 0; i < segs; i++) { + for (i = 0; i < segs; i++) { block = seg[i].block; count = seg[i].count; diff --git a/user/inode.c b/user/inode.c index 444bbc5..dc8dda7 100644 --- a/user/inode.c +++ b/user/inode.c @@ -29,9 +29,10 @@ static unsigned long hash(inum_t inum) void inode_leak_check(void) { + int i; int leaks = 0; - for (int i = 0; i < HASH_SIZE; i++) { + for (i = 0; i < HASH_SIZE; i++) { struct hlist_head *head = inode_hashtable + i; struct inode *inode; hlist_for_each_entry(inode, head, i_hash) { diff --git a/user/kernel/Makefile b/user/kernel/Makefile index 93229a1..aba11fe 100644 --- a/user/kernel/Makefile +++ b/user/kernel/Makefile @@ -10,8 +10,5 @@ obj-$(CONFIG_TUX3) += tux3.o tux3-objs += balloc.o btree.o buffer.o commit.o dir.o dleaf.o dleaf2.o \ filemap.o iattr.o ileaf.o inode.o log.o namei.o orphan.o replay.o \ super.o utility.o writeback.o xattr.o -EXTRA_CFLAGS += -Werror -std=gnu99 -Wno-declaration-after-statement -#EXTRA_CFLAGS += -DTUX3_FLUSHER=TUX3_FLUSHER_SYNC -#EXTRA_CFLAGS += -DTUX3_FLUSHER=TUX3_FLUSHER_ASYNC_OWN EXTRA_CFLAGS += -DTUX3_FLUSHER=TUX3_FLUSHER_ASYNC_HACK endif diff --git a/user/kernel/balloc.c b/user/kernel/balloc.c index cc642d0..bf5d2c1 100644 --- a/user/kernel/balloc.c +++ b/user/kernel/balloc.c @@ -145,11 +145,13 @@ static int countmap_used(struct sb *sb, block_t group) #ifndef __KERNEL__ void countmap_dump(struct sb *sb, block_t start, block_t count) { + block_t group; unsigned groupbits = sb->groupbits, groupsize = 1 << groupbits; - for (block_t group = start; group < count; group++) { + for (group = start; group < count; group++) { block_t block = group << groupbits; block_t blocks = min_t(block_t, sb->volblocks - block, groupsize); + __tux3_dbg("%Lu: %i used, ", group, countmap_used(sb, group)); bitmap_dump(sb->bitmap, block, blocks); } @@ -164,10 +166,12 @@ void countmap_dump(struct sb *sb, block_t start, block_t count) block_t count_range(struct inode *inode, block_t start, block_t count) { unsigned char ones[256]; + block_t block; + int i; assert(!(start & 7)); - for (int i = 0; i < sizeof(ones); i++) + for (i = 0; i < sizeof(ones); i++) ones[i] = bytebits(i); struct sb *sb = tux_sb(inode->i_sb); @@ -178,15 +182,20 @@ block_t count_range(struct inode *inode, block_t start, block_t count) block_t tail = (count + 7) >> 3, total = 0; unsigned offset = (start & mapmask) >> 3; - for (block_t block = start >> mapshift; block < blocks; block++) { + for (block = start >> mapshift; block < blocks; block++) { + struct buffer_head *buffer; + unsigned char *p, *top; + unsigned bytes; + trace_off("count block %x/%x", block, blocks); - struct buffer_head *buffer = blockread(mapping(inode), block); + buffer = blockread(mapping(inode), block); if (!buffer) return -1; - unsigned bytes = sb->blocksize - offset; + bytes = sb->blocksize - offset; if (bytes > tail) bytes = tail; - unsigned char *p = bufdata(buffer) + offset, *top = p + bytes; + p = bufdata(buffer) + offset; + top = p + bytes; while (p < top) total += ones[*p++]; blockput(buffer); @@ -199,6 +208,7 @@ block_t count_range(struct inode *inode, block_t start, block_t count) void bitmap_dump(struct inode *inode, block_t start, block_t count) { enum { show_used = 0 }; + block_t block; struct sb *sb = tux_sb(inode->i_sb); unsigned mapshift = sb->blockbits + 3; unsigned mapsize = 1 << mapshift; @@ -208,20 +218,27 @@ void bitmap_dump(struct inode *inode, block_t start, block_t count) block_t tail = (count + bit + 7) >> 3, begin = -1; __tux3_dbg("%s regions in %Lu/%Lu: ", show_used ? "used" : "free", start, count); - for (block_t block = start >> mapshift; block < blocks; block++) { + for (block = start >> mapshift; block < blocks; block++) { + unsigned bytes; + unsigned char *data, *p, *top; struct buffer_head *buffer = blockread(mapping(inode), block); + assert(buffer); - unsigned bytes = sb->blocksize - offset; + bytes = sb->blocksize - offset; if (bytes > tail) bytes = tail; - unsigned char *data = bufdata(buffer), *p = data + offset, *top = p + bytes; + data = bufdata(buffer); + p = data + offset; + top = p + bytes; for (; p < top; p++, bit = 0) { + int i, mask; unsigned c = *p; + if ((!c && ((begin >= 0) ^ show_used))) continue; if (((c == 0xff) && ((begin < 0) ^ show_used))) continue; - for (int i = bit, mask = 1 << bit; i < 8; i++, mask <<= 1) { + for (i = bit, mask = 1 << bit; i < 8; i++, mask <<= 1) { if (!(c & mask) ^ (begin < 0) ^ show_used) continue; block_t found = i + ((p - data) << 3) + (block << mapshift); diff --git a/user/kernel/btree.c b/user/kernel/btree.c index 16d602a..10b11b4 100644 --- a/user/kernel/btree.c +++ b/user/kernel/btree.c @@ -52,12 +52,12 @@ static inline unsigned bcount(struct bnode *node) static struct buffer_head *new_block(struct btree *btree) { - block_t block; + struct buffer_head *buffer; + block_t block = balloc_one(btree->sb); - block = balloc_one(btree->sb); if (block < 0) return ERR_PTR(block); - struct buffer_head *buffer = vol_getblk(btree->sb, block); + buffer = vol_getblk(btree->sb, block); if (!buffer) return ERR_PTR(-ENOMEM); // ERR_PTR me!!! and bfree? return buffer; @@ -197,8 +197,10 @@ void release_cursor(struct cursor *cursor) /* unused */ void show_cursor(struct cursor *cursor, int depth) { + int i; + __tux3_dbg(">>> cursor %p/%i:", cursor, depth); - for (int i = 0; i < depth; i++) { + for (i = 0; i < depth; i++) { __tux3_dbg(" [%Lx/%i]", bufindex(cursor->path[i].buffer), bufcount(cursor->path[i].buffer)); @@ -208,18 +210,25 @@ void show_cursor(struct cursor *cursor, int depth) static void cursor_check(struct cursor *cursor) { + tuxkey_t key; + block_t block; + int i; + if (cursor->level == -1) return; - tuxkey_t key = 0; - block_t block = cursor->btree->root.block; + key = 0; + block = cursor->btree->root.block; + + for (i = 0; i <= cursor->level; i++) { + struct bnode *bnode; + struct index_entry *entry; - for (int i = 0; i <= cursor->level; i++) { assert(bufindex(cursor->path[i].buffer) == block); if (i == cursor->level) break; - struct bnode *bnode = level_node(cursor, i); - struct index_entry *entry = cursor->path[i].next - 1; + bnode = level_node(cursor, i); + entry = cursor->path[i].next - 1; assert(bnode->entries <= entry); assert(entry < bnode->entries + bcount(bnode)); /* @@ -248,11 +257,13 @@ struct cursor *alloc_cursor(struct btree *btree, int extra) struct cursor *cursor = malloc(alloc_cursor_size(maxlevel + 1)); if (cursor) { + int i; + cursor->btree = btree; cursor->level = -1; #ifdef CURSOR_DEBUG cursor->maxlevel = maxlevel; - for (int i = 0; i <= maxlevel; i++) { + for (i = 0; i <= maxlevel; i++) { cursor->path[i].buffer = FREE_BUFFER; /* for debug */ cursor->path[i].next = FREE_NEXT; /* for debug */ } @@ -498,12 +509,15 @@ out: void show_tree_range(struct btree *btree, tuxkey_t start, unsigned count) { + struct cursor *cursor; + struct buffer_head *buffer; + __tux3_dbg("%i level btree at %Li:\n", btree->root.depth, btree->root.block); if (!has_root(btree)) return; - struct cursor *cursor = alloc_cursor(btree, 0); + cursor = alloc_cursor(btree, 0); if (!cursor) { tux3_err(btree->sb, "out of memory"); return; @@ -513,7 +527,6 @@ void show_tree_range(struct btree *btree, tuxkey_t start, unsigned count) goto out; } - struct buffer_head *buffer; do { buffer = cursor_leafbuf(cursor); assert((btree->ops->leaf_sniff)(btree, bufdata(buffer))); @@ -792,6 +805,7 @@ static int try_bnode_merge(struct sb *sb, struct buffer_head *intobuf, */ int btree_chop(struct btree *btree, tuxkey_t start, u64 len) { + int i; struct sb *sb = btree->sb; struct btree_ops *ops = btree->ops; struct buffer_head **prev, *leafprev = NULL; @@ -950,7 +964,7 @@ chop_root: out: if (leafprev) blockput(leafprev); - for (int i = 0; i < btree->root.depth; i++) { + for (i = 0; i < btree->root.depth; i++) { if (prev[i]) blockput(prev[i]); } @@ -1005,6 +1019,11 @@ static void bnode_split(struct bnode *src, unsigned pos, struct bnode *dst) */ static int insert_leaf(struct cursor *cursor, tuxkey_t childkey, struct buffer_head *leafbuf, int keep) { + struct buffer_head *newbuf; + struct bnode *newroot; + block_t newrootblock; + block_t oldrootblock; + int left_node; struct btree *btree = cursor->btree; struct sb *sb = btree->sb; int level = btree->root.depth; @@ -1017,6 +1036,11 @@ static int insert_leaf(struct cursor *cursor, tuxkey_t childkey, struct buffer_h cursor_push(cursor, leafbuf, NULL); } while (level--) { + struct buffer_head *newbuf; + struct bnode *newnode; + unsigned half; + u64 newkey; + int child_is_left; struct path_level *at = &cursor->path[level]; struct buffer_head *parentbuf = at->buffer; struct bnode *parent = bufdata(parentbuf); @@ -1033,19 +1057,19 @@ static int insert_leaf(struct cursor *cursor, tuxkey_t childkey, struct buffer_h } /* split a full index node */ - struct buffer_head *newbuf = new_node(btree); + newbuf = new_node(btree); if (IS_ERR(newbuf)) return PTR_ERR(newbuf); - struct bnode *newnode = bufdata(newbuf); - unsigned half = bcount(parent) / 2; - u64 newkey = be64_to_cpu(parent->entries[half].key); + newnode = bufdata(newbuf); + half = bcount(parent) / 2; + newkey = be64_to_cpu(parent->entries[half].key); bnode_split(parent, half, newnode); log_bnode_split(sb, bufindex(parentbuf), half, bufindex(newbuf)); /* if the cursor is in the new node, use that as the parent */ - int child_is_left = at->next <= parent->entries + half; + child_is_left = at->next <= parent->entries + half; if (!child_is_left) { struct index_entry *newnext; mark_buffer_unify_non(parentbuf); @@ -1077,14 +1101,14 @@ static int insert_leaf(struct cursor *cursor, tuxkey_t childkey, struct buffer_h /* Make new root bnode */ trace("add tree level"); - struct buffer_head *newbuf = new_node(btree); + newbuf = new_node(btree); if (IS_ERR(newbuf)) return PTR_ERR(newbuf); - struct bnode *newroot = bufdata(newbuf); - block_t newrootblock = bufindex(newbuf); - block_t oldrootblock = btree->root.block; - int left_node = bufindex(cursor->path[0].buffer) != childblock; + newroot = bufdata(newbuf); + newrootblock = bufindex(newbuf); + oldrootblock = btree->root.block; + left_node = bufindex(cursor->path[0].buffer) != childblock; bnode_init_root(newroot, 2, oldrootblock, childblock, childkey); cursor_root_add(cursor, newbuf, newroot->entries + 1 + !left_node); log_bnode_root(sb, newrootblock, 2, oldrootblock, childblock, childkey); @@ -1117,18 +1141,21 @@ int btree_insert_leaf(struct cursor *cursor, tuxkey_t key, struct buffer_head *l */ static int btree_leaf_split(struct cursor *cursor, tuxkey_t key, tuxkey_t hint) { - trace("split leaf"); - struct btree *btree = cursor->btree; + struct btree *btree; struct buffer_head *newbuf; + struct buffer_head *leafbuf; + tuxkey_t newkey; + + trace("split leaf"); + btree = cursor->btree; newbuf = new_leaf(btree); if (IS_ERR(newbuf)) return PTR_ERR(newbuf); log_balloc(btree->sb, bufindex(newbuf), 1); - struct buffer_head *leafbuf = cursor_leafbuf(cursor); - tuxkey_t newkey = btree->ops->leaf_split(btree, hint, bufdata(leafbuf), - bufdata(newbuf)); + leafbuf = cursor_leafbuf(cursor); + newkey = btree->ops->leaf_split(btree, hint, bufdata(leafbuf), bufdata(newbuf)); assert(cursor_this_key(cursor) < newkey); assert(newkey < cursor_next_key(cursor)); if (key < newkey) @@ -1249,18 +1276,23 @@ void init_btree(struct btree *btree, struct sb *sb, struct root root, struct btr int alloc_empty_btree(struct btree *btree) { + struct buffer_head *leafbuf; + struct bnode *rootnode; + block_t rootblock; + block_t leafblock; struct sb *sb = btree->sb; struct buffer_head *rootbuf = new_node(btree); + if (IS_ERR(rootbuf)) goto error; - struct buffer_head *leafbuf = new_leaf(btree); + leafbuf = new_leaf(btree); if (IS_ERR(leafbuf)) goto error_leafbuf; assert(!has_root(btree)); - struct bnode *rootnode = bufdata(rootbuf); - block_t rootblock = bufindex(rootbuf); - block_t leafblock = bufindex(leafbuf); + rootnode = bufdata(rootbuf); + rootblock = bufindex(rootbuf); + leafblock = bufindex(leafbuf); trace("root at %Lx", rootblock); trace("leaf at %Lx", leafblock); bnode_init_root(rootnode, 1, leafblock, 0, 0); @@ -1288,14 +1320,19 @@ error: /* FIXME: right? and this should be done by btree_chop()? */ int free_empty_btree(struct btree *btree) { + struct sb *sb; + struct buffer_head *rootbuf; + struct bnode *rootnode; + block_t leaf; + struct buffer_head *leafbuf; struct btree_ops *ops = btree->ops; if (!has_root(btree)) return 0; assert(btree->root.depth == 1); - struct sb *sb = btree->sb; - struct buffer_head *rootbuf = vol_bread(sb, btree->root.block); + sb = btree->sb; + rootbuf = vol_bread(sb, btree->root.block); if (!rootbuf) return -EIO; assert(bnode_sniff(bufdata(rootbuf))); @@ -1303,10 +1340,10 @@ int free_empty_btree(struct btree *btree) btree->root = no_root; tux3_mark_btree_dirty(btree); - struct bnode *rootnode = bufdata(rootbuf); + rootnode = bufdata(rootbuf); assert(bcount(rootnode) == 1); - block_t leaf = be64_to_cpu(rootnode->entries[0].block); - struct buffer_head *leafbuf = vol_find_get_block(sb, leaf); + leaf = be64_to_cpu(rootnode->entries[0].block); + leafbuf = vol_find_get_block(sb, leaf); if (leafbuf && !leaf_need_redirect(sb, leafbuf)) { /* @@ -1436,13 +1473,13 @@ static int replay_bnode_change(struct sb *sb, block_t bnodeblock, u64 val1, u64 val2, void (*change)(struct bnode *, u64, u64)) { - struct buffer_head *bnodebuf; + struct bnode *bnode; + struct buffer_head *bnodebuf = vol_getblk(sb, bnodeblock); - bnodebuf = vol_getblk(sb, bnodeblock); if (!bnodebuf) return -ENOMEM; /* FIXME: error code */ - struct bnode *bnode = bufdata(bnodebuf); + bnode = bufdata(bnodebuf); change(bnode, val1, val2); mark_buffer_unify_non(bnodebuf); diff --git a/user/kernel/commit.c b/user/kernel/commit.c index b64e617..12bd2c9 100644 --- a/user/kernel/commit.c +++ b/user/kernel/commit.c @@ -350,8 +350,10 @@ static int apply_defered_bfree(struct sb *sb, u64 val) static int commit_delta(struct sb *sb) { + int err; + trace("commit %i logblocks", be32_to_cpu(sb->super.logcount)); - int err = save_sb(sb); + err = save_sb(sb); if (err) return err; diff --git a/user/kernel/commit_flusher_hack.c b/user/kernel/commit_flusher_hack.c index 6e7d05a..7006e0d 100644 --- a/user/kernel/commit_flusher_hack.c +++ b/user/kernel/commit_flusher_hack.c @@ -155,6 +155,7 @@ static int tux3_has_old_data(struct bdi_writeback *wb) static long tux3_wb_check_old_data_flush(struct bdi_writeback *wb) { + struct wb_writeback_work work; /* Hack: dirty_expire_interval is not exported to module */ unsigned long expired; @@ -179,13 +180,11 @@ static long tux3_wb_check_old_data_flush(struct bdi_writeback *wb) return 1; } - struct wb_writeback_work work = { - .nr_pages = 0, - .sync_mode = WB_SYNC_NONE, - .for_kupdate = 1, - .range_cyclic = 1, - .reason = WB_REASON_PERIODIC, - }; + work.nr_pages = 0; + work.sync_mode = WB_SYNC_NONE; + work.for_kupdate = 1; + work.range_cyclic = 1; + work.reason = WB_REASON_PERIODIC; return tux3_wb_writeback(wb, &work); } diff --git a/user/kernel/dir.c b/user/kernel/dir.c index 1c0cfb3..caf88f1 100644 --- a/user/kernel/dir.c +++ b/user/kernel/dir.c @@ -126,6 +126,7 @@ void tux_update_dirent(struct inode *dir, struct buffer_head *buffer, loff_t tux_alloc_entry(struct inode *dir, const char *name, unsigned len, loff_t *size, struct buffer_head **hold) { + tux_dirent *limit; unsigned delta = tux3_get_current_delta(); struct sb *sb = tux_sb(dir->i_sb); tux_dirent *entry; @@ -141,7 +142,7 @@ loff_t tux_alloc_entry(struct inode *dir, const char *name, unsigned len, if (!buffer) return -EIO; entry = bufdata(buffer); - tux_dirent *limit = bufdata(buffer) + blocksize - reclen; + limit = bufdata(buffer) + blocksize - reclen; while (entry <= limit) { if (entry->rec_len == 0) { blockput(buffer); @@ -265,6 +266,7 @@ error: tux_dirent *tux_find_entry(struct inode *dir, const char *name, unsigned len, struct buffer_head **result, loff_t size) { + tux_dirent *entry, *limit; struct sb *sb = tux_sb(dir->i_sb); unsigned reclen = TUX_REC_LEN(len); block_t block, blocks = size >> sb->blockbits; @@ -272,12 +274,13 @@ tux_dirent *tux_find_entry(struct inode *dir, const char *name, unsigned len, for (block = 0; block < blocks; block++) { struct buffer_head *buffer = blockread(mapping(dir), block); + if (!buffer) { err = -EIO; // need ERR_PTR for blockread!!! goto error; } - tux_dirent *entry = bufdata(buffer); - tux_dirent *limit = (void *)entry + sb->blocksize - reclen; + entry = bufdata(buffer); + limit = (void *)entry + sb->blocksize - reclen; while (entry <= limit) { if (entry->rec_len == 0) { blockput(buffer); @@ -330,14 +333,18 @@ int tux_readdir(struct file *file, void *state, filldir_t filldir) assert(!(dir->i_size & sb->blockmask)); for (block = pos >> blockbits ; block < blocks; block++) { + void *base; + tux_dirent *limit, *entry; struct buffer_head *buffer = blockread(mapping(dir), block); + if (!buffer) return -EIO; - void *base = bufdata(buffer); + base = bufdata(buffer); if (revalidate) { if (offset) { - tux_dirent *entry = base + offset; tux_dirent *p = base + (offset & sb->blockmask); + + entry = base + offset; while (p < entry && p->rec_len) p = next_entry(p); offset = (void *)p - base; @@ -346,8 +353,8 @@ int tux_readdir(struct file *file, void *state, filldir_t filldir) file->f_version = dir->i_version; revalidate = 0; } - tux_dirent *limit = base + sb->blocksize - TUX_REC_LEN(1); - for (tux_dirent *entry = base + offset; entry <= limit; entry = next_entry(entry)) { + limit = base + sb->blocksize - TUX_REC_LEN(1); + for (entry = base + offset; entry <= limit; entry = next_entry(entry)) { if (entry->rec_len == 0) { blockput(buffer); tux_zero_len_error(dir, block); @@ -433,6 +440,7 @@ int tux_delete_dirent(struct inode *dir, struct buffer_head *buffer, int tux_dir_is_empty(struct inode *dir) { + tux_dirent *entry, *limit; struct sb *sb = tux_sb(dir->i_sb); block_t block, blocks = dir->i_size >> sb->blockbits; __be64 self = cpu_to_be64(tux_inode(dir)->inum); @@ -443,8 +451,8 @@ int tux_dir_is_empty(struct inode *dir) if (!buffer) return -EIO; - tux_dirent *entry = bufdata(buffer); - tux_dirent *limit = bufdata(buffer) + sb->blocksize - TUX_REC_LEN(1); + entry = bufdata(buffer); + limit = bufdata(buffer) + sb->blocksize - TUX_REC_LEN(1); for (; entry <= limit; entry = next_entry(entry)) { if (!entry->rec_len) { blockput(buffer); diff --git a/user/kernel/dleaf.c b/user/kernel/dleaf.c index 5b4a5ed..b5cbce7 100644 --- a/user/kernel/dleaf.c +++ b/user/kernel/dleaf.c @@ -110,29 +110,44 @@ static int dleaf_can_free(struct btree *btree, void *vleaf) void dleaf_dump(struct btree *btree, void *vleaf) { + struct group *group; + unsigned blocksize; + struct dleaf *leaf; + struct group *gdict, *gbase; + struct entry *edict, *entry; + struct diskextent *extents; + if (!tux3_trace) return; - unsigned blocksize = btree->sb->blocksize; - struct dleaf *leaf = vleaf; - struct group *gdict = (void *)leaf + blocksize, *gbase = --gdict - dleaf_groups(leaf); - struct entry *edict = (void *)(gbase + 1), *entry = edict; - struct diskextent *extents = leaf->table; + blocksize = btree->sb->blocksize; + leaf = vleaf; + gdict = (void *)leaf + blocksize; + gbase = --gdict - dleaf_groups(leaf); + edict = (void *)(gbase + 1); + entry = edict; + extents = leaf->table; __tux3_dbg("%i entry groups:\n", dleaf_groups(leaf)); - for (struct group *group = gdict; group > gbase; group--) { + for (group = gdict; group > gbase; group--) { + struct entry *ebase; + __tux3_dbg(" %ti/%i:", gdict - group, group_count(group)); //__tux3_dbg(" [%i]", extents - leaf->table); - struct entry *ebase = entry - group_count(group); + ebase = entry - group_count(group); while (entry > ebase) { + unsigned offset; + int count; + int i; + --entry; - unsigned offset = entry == edict - 1 ? 0 : entry_limit(entry + 1); - int count = entry_limit(entry) - offset; + offset = entry == edict - 1 ? 0 : entry_limit(entry + 1); + count = entry_limit(entry) - offset; __tux3_dbg(" %Lx =>", get_index(group, entry)); //__tux3_dbg(" %p (%i)", entry, entry_limit(entry)); if (count < 0) __tux3_dbg(" "); - else for (int i = 0; i < count; i++) { + else for (i = 0; i < count; i++) { struct diskextent extent = extents[offset + i]; __tux3_dbg(" %Lx", extent_block(extent)); if (extent_count(extent)) @@ -149,17 +164,21 @@ void dleaf_dump(struct btree *btree, void *vleaf) static int dleaf_free2(struct dleaf *leaf, unsigned blocksize) { + struct group *group; struct group *gdict = (void *)leaf + blocksize, *gstop = gdict - dleaf_groups(leaf); struct entry *edict = (void *)gstop, *entry = edict; struct diskextent *extents = leaf->table; - for (struct group *group = gdict; group-- > gstop;) + for (group = gdict; group-- > gstop;) extents += entry_limit(entry -= group_count(group)); return (void *)entry - (void *)extents; } static int dleaf_check(struct dleaf *leaf, unsigned blocksize) { + struct group *group; + unsigned keyhi; + struct diskextent *exbase; struct group *gdict = (void *)leaf + blocksize, *gstop = gdict - dleaf_groups(leaf); struct entry *edict = (void *)gstop, *estop = edict; struct diskextent *extents = leaf->table; @@ -169,24 +188,30 @@ static int dleaf_check(struct dleaf *leaf, unsigned blocksize) if (!dleaf_groups(leaf)) return 0; - unsigned keyhi = 0; - struct diskextent *exbase = leaf->table; - for (struct group *group = gdict - 1; group >= gstop; group--) { + keyhi = 0; + exbase = leaf->table; + for (group = gdict - 1; group >= gstop; group--) { + struct entry *entry; + unsigned limit, keylo; + struct diskextent *exstop; + block_t block; + assert(group_keyhi(group) >= keyhi); assert(group_count(group) > 0); assert(group_count(group) <= MAX_GROUP_ENTRIES); keyhi = group_keyhi(group); - struct entry *entry = estop; + entry = estop; estop -= group_count(group); - unsigned limit = 0, keylo = -1; + limit = 0; + keylo = -1; while (--entry >= estop) { assert((int)entry_keylo(entry) > (int)keylo); assert(entry_limit(entry) > limit); keylo = entry_keylo(entry); limit = entry_limit(entry); } - struct diskextent *exstop = exbase + entry_limit(estop); - block_t block = 0; + exstop = exbase + entry_limit(estop); + block = 0; while (exbase < exstop) { assert(extent_block(*exbase) != block); exbase++; @@ -214,6 +239,12 @@ eek: static int dleaf_split_at(void *from, void *into, int split, unsigned blocksize) { + struct group *group; + unsigned cut; + unsigned size; + struct group *gdict2; + struct entry *edict2; + unsigned encopy; struct dleaf *leaf = from, *leaf2 = into; unsigned groups = dleaf_groups(leaf), groups2; struct group *gdict = from + blocksize, *gbase = gdict - groups; @@ -225,7 +256,7 @@ static int dleaf_split_at(void *from, void *into, int split, unsigned blocksize) if (!groups) return 0; assert(split < entries); - for (struct group *group = gdict - 1; group >= gbase; group--, grsplit++) { + for (group = gdict - 1; group >= gbase; group--, grsplit++) { if (recount + group_count(group) > split) break; edict -= group_count(group); @@ -234,18 +265,18 @@ static int dleaf_split_at(void *from, void *into, int split, unsigned blocksize) } /* have to split a group? */ - unsigned cut = split - recount; + cut = split - recount; if (cut) exsplit += entry_limit(edict - cut); edict = (void *)gbase; /* restore it */ trace("split %i entries at group %i, entry %x", entries, grsplit, cut); trace("split extents at %i", exsplit); /* copy extents */ - unsigned size = from + be16_to_cpu(leaf->free) - (void *)(leaf->table + exsplit); + size = from + be16_to_cpu(leaf->free) - (void *)(leaf->table + exsplit); memcpy(leaf2->table, leaf->table + exsplit, size); /* copy groups */ - struct group *gdict2 = (void *)leaf2 + blocksize; + gdict2 = (void *)leaf2 + blocksize; set_dleaf_groups(leaf2, groups2 = (groups - grsplit)); veccopy(gdict2 - dleaf_groups(leaf2), gbase, dleaf_groups(leaf2)); inc_group_count(gdict2 - 1, -cut); @@ -255,15 +286,18 @@ static int dleaf_split_at(void *from, void *into, int split, unsigned blocksize) set_group_count(gdict - groups, cut); /* copy entries */ - struct entry *edict2 = (void *)(gdict2 - groups2); + edict2 = (void *)(gdict2 - groups2); assert((struct entry *)((void *)leaf + be16_to_cpu(leaf->used)) == edict - entries); - unsigned encopy = entries - split; + encopy = entries - split; veccopy(edict2 - encopy, ebase, encopy); - if (cut) - for (int i = 1; i <= group_count((gdict2 - 1)); i++) + if (cut) { + int i; + + for (i = 1; i <= group_count((gdict2 - 1)); i++) inc_entry_limit(edict2 - i, -entry_limit(edict - split)); + } vecmove(gdict - groups - split, edict - split, split); /* clean up */ @@ -283,16 +317,26 @@ static int dleaf_split_at(void *from, void *into, int split, unsigned blocksize) */ static tuxkey_t dleaf_split(struct btree *btree, tuxkey_t hint, void *from, void *into) { + unsigned blocksize; + struct group *gdict, *gbase; + struct entry *edict; + struct entry *ebase; + unsigned entries; + unsigned groups2; + struct group *gdict2; struct dleaf *leaf = from, *leaf2 = into; + assert(dleaf_sniff(btree, from)); - unsigned blocksize = btree->sb->blocksize; - struct group *gdict = from + blocksize, *gbase = gdict - dleaf_groups(leaf); - struct entry *edict = (void *)gbase; - struct entry *ebase = (void *)leaf + be16_to_cpu(leaf->used); - unsigned entries = edict - ebase; + blocksize = btree->sb->blocksize; + gdict = from + blocksize; + gbase = gdict - dleaf_groups(leaf); + edict = (void *)gbase; + ebase = (void *)leaf + be16_to_cpu(leaf->used); + entries = edict - ebase; + assert(entries >= 2); - unsigned groups2 = dleaf_split_at(from, into, entries / 2, blocksize); - struct group *gdict2 = (void *)leaf2 + blocksize; + groups2 = dleaf_split_at(from, into, entries / 2, blocksize); + gdict2 = (void *)leaf2 + blocksize; return get_index(gdict2 - 1, (struct entry *)(gdict2 - groups2) - 1); } @@ -306,6 +350,9 @@ static tuxkey_t dleaf_split(struct btree *btree, tuxkey_t hint, void *from, void */ int dleaf_merge(struct btree *btree, void *vinto, void *vfrom) { + unsigned size; + unsigned addgroups; + struct entry *ebase2, *ebase; struct dleaf *leaf = vinto, *from = vfrom; struct group *gdict = (void *)leaf + btree->sb->blocksize; struct group *gstop = gdict - dleaf_groups(leaf); @@ -355,15 +402,15 @@ int dleaf_merge(struct btree *btree, void *vinto, void *vfrom) } /* append extents */ - unsigned size = be16_to_cpu(from->free) - sizeof(struct dleaf); + size = be16_to_cpu(from->free) - sizeof(struct dleaf); memcpy((void *)leaf + free, from->table, size); leaf->free = cpu_to_be16(free + size); /* make space and append groups except for possibly merged group */ assert(sizeof(struct group) == sizeof(struct entry)); - unsigned addgroups = dleaf_groups(from) - can_merge_group; - struct entry *ebase2 = (void *)from + be16_to_cpu(from->used); - struct entry *ebase = (void *)leaf + be16_to_cpu(leaf->used); + addgroups = dleaf_groups(from) - can_merge_group; + ebase2 = (void *)from + be16_to_cpu(from->used); + ebase = (void *)leaf + be16_to_cpu(leaf->used); vecmove(ebase - addgroups, ebase, edict - ebase); veccopy(gstop - addgroups, gstop2, addgroups); ebase -= addgroups; @@ -475,6 +522,8 @@ unsigned dwalk_count(struct dwalk *walk) /* unused */ void dwalk_dump(struct dwalk *walk) { + struct diskextent *entry_exbase; + if (walk->leaf->table == walk->exstop) { trace_on("empty leaf"); return; @@ -483,7 +532,6 @@ void dwalk_dump(struct dwalk *walk) trace_on("end of extent"); return; } - struct diskextent *entry_exbase; if (walk->entry + 1 == walk->estop + group_count(walk->group)) entry_exbase = walk->exbase; else @@ -549,10 +597,11 @@ int dwalk_next(struct dwalk *walk) /* Back to the previous extent. (i.e. rewind the previous dwalk_next()) */ int dwalk_back(struct dwalk *walk) { + struct diskextent *entry_exbase; + /* first extent of this dleaf, or empty dleaf */ if (dwalk_first(walk)) return 0; - struct diskextent *entry_exbase; if (walk->entry + 1 == walk->estop + group_count(walk->group)) entry_exbase = walk->exbase; @@ -581,8 +630,11 @@ int dwalk_back(struct dwalk *walk) */ int dwalk_probe(struct dleaf *leaf, unsigned blocksize, struct dwalk *walk, tuxkey_t key) { + unsigned keylo, keyhi; + trace("probe for 0x%Lx", key); - unsigned keylo = key & 0xffffff, keyhi = key >> 24; + keylo = key & 0xffffff; + keyhi = key >> 24; walk->leaf = leaf; walk->gdict = (void *)leaf + blocksize; @@ -655,8 +707,10 @@ probe_entry: int dwalk_mock(struct dwalk *walk, tuxkey_t index, struct diskextent extent) { if (!dleaf_groups(walk->leaf) || walk->entry == walk->estop || dwalk_index(walk) != index) { + unsigned keylo, keyhi; trace("add entry 0x%Lx", index); - unsigned keylo = index & 0xffffff, keyhi = index >> 24; + keylo = index & 0xffffff; + keyhi = index >> 24; if (!walk->mock.groups || group_keyhi(&walk->mock.group) != keyhi || group_count(&walk->mock.group) >= MAX_GROUP_ENTRIES) { trace("add group %i", walk->mock.groups); walk->exbase += entry_limit(&walk->mock.entry); @@ -678,6 +732,15 @@ int dwalk_mock(struct dwalk *walk, tuxkey_t index, struct diskextent extent) /* This copy extents >= this extent to another dleaf. */ void dwalk_copy(struct dwalk *walk, struct dleaf *dest) { + struct group *gdict2; + unsigned groups2; + struct entry *ebase; + unsigned entries; + struct entry *edict2; + struct diskextent *exend, *entry_exbase; + unsigned limit_adjust, extents; + unsigned gcount2; + struct entry *entry2, *estop2; struct dleaf *leaf = walk->leaf; unsigned blocksize = (void *)walk->gdict - (void *)leaf; @@ -689,14 +752,12 @@ void dwalk_copy(struct dwalk *walk, struct dleaf *dest) return; } - struct group *gdict2 = (void *)dest + blocksize; - unsigned groups2 = walk->group + 1 - walk->gstop; - struct entry *ebase = (void *)leaf + be16_to_cpu(leaf->used); - unsigned entries = (walk->entry + 1) - ebase; - struct entry *edict2 = (struct entry *)(gdict2 - groups2); - struct diskextent *exend = (void *)leaf + be16_to_cpu(leaf->free); - struct diskextent *entry_exbase; - unsigned limit_adjust, extents; + gdict2 = (void *)dest + blocksize; + groups2 = walk->group + 1 - walk->gstop; + ebase = (void *)leaf + be16_to_cpu(leaf->used); + entries = (walk->entry + 1) - ebase; + edict2 = (struct entry *)(gdict2 - groups2); + exend = (void *)leaf + be16_to_cpu(leaf->free); if (walk->entry + 1 == walk->estop + group_count(walk->group)) { entry_exbase = walk->exbase; @@ -711,12 +772,13 @@ void dwalk_copy(struct dwalk *walk, struct dleaf *dest) veccopy(edict2 - entries, ebase, entries); veccopy(dest->table, entry_exbase, extents); - unsigned gcount2 = (walk->entry + 1) - walk->estop; + gcount2 = (walk->entry + 1) - walk->estop; set_dleaf_groups(dest, groups2); dest->free = cpu_to_be16((void *)(dest->table + extents) - (void *)dest); dest->used = cpu_to_be16((void *)(edict2 - entries) - (void *)dest); set_group_count(gdict2 - 1, gcount2); - struct entry *entry2 = edict2 - 1, *estop2 = edict2 - gcount2; + entry2 = edict2 - 1; + estop2 = edict2 - gcount2; while (entry2 >= estop2) { inc_entry_limit(entry2, -limit_adjust); entry2--; @@ -727,10 +789,14 @@ void dwalk_copy(struct dwalk *walk, struct dleaf *dest) /* This removes extents >= this extent. (cursor position is dwalk_end()). */ void dwalk_chop(struct dwalk *walk) { + struct dleaf *leaf; + struct entry *ebase; + void *entry; + trace(" "); if (dwalk_end(walk)) return; - struct dleaf *leaf = walk->leaf; + leaf = walk->leaf; if (dwalk_first(walk)) { unsigned blocksize = (void *)walk->gdict - (void *)leaf; @@ -745,8 +811,8 @@ void dwalk_chop(struct dwalk *walk) /* If extent is first extent on this group, remove this group too */ dwalk_back(walk); - struct entry *ebase = walk->estop + group_count(walk->group); - void *entry = walk->entry; + ebase = walk->estop + group_count(walk->group); + entry = walk->entry; set_dleaf_groups(leaf, walk->gdict - walk->group); set_group_count(walk->group, ebase - walk->entry); entry += (void *)walk->group - (void *)walk->gstop; @@ -779,8 +845,11 @@ int dwalk_add(struct dwalk *walk, tuxkey_t index, struct diskextent extent) trace("group %ti/%i", walk->gstop + groups - 1 - walk->group, groups); if (!groups || dwalk_index(walk) != index) { + unsigned keylo, keyhi; + trace("add entry 0x%Lx", index); - unsigned keylo = index & 0xffffff, keyhi = index >> 24; + keylo = index & 0xffffff; + keyhi = index >> 24; if (!groups || group_keyhi(walk->group) != keyhi || group_count(walk->group) >= MAX_GROUP_ENTRIES) { trace("add group %i", groups); /* will it fit? */ @@ -838,6 +907,7 @@ static void dwalk_update(struct dwalk *walk, struct diskextent extent) */ static int dleaf_chop(struct btree *btree, tuxkey_t start, u64 len, void *vleaf) { + struct dwalk rewind; struct sb *sb = btree->sb; struct dleaf *leaf = vleaf; struct dwalk walk; @@ -860,7 +930,7 @@ static int dleaf_chop(struct btree *btree, tuxkey_t start, u64 len, void *vleaf) if (!dwalk_next(&walk)) goto out; } - struct dwalk rewind = walk; + rewind = walk; do { defer_bfree(sb, &sb->defree, dwalk_block(&walk), dwalk_count(&walk)); log_bfree(sb, dwalk_block(&walk), dwalk_count(&walk)); diff --git a/user/kernel/dleaf2.c b/user/kernel/dleaf2.c index ac6d6cb..6e12272 100644 --- a/user/kernel/dleaf2.c +++ b/user/kernel/dleaf2.c @@ -113,17 +113,15 @@ static int dleaf2_init(struct btree *btree, void *leaf) static int dleaf2_sniff(struct btree *btree, void *leaf) { + struct extent ex; struct dleaf2 *dleaf = leaf; - if (dleaf->magic != cpu_to_be16(TUX3_MAGIC_DLEAF2)) - return 1; - if (!dleaf->count) + + if (dleaf->magic != cpu_to_be16(TUX3_MAGIC_DLEAF2) || + !dleaf->count) return 1; /* Last should be sentinel */ - struct extent ex; get_extent(dleaf->table + be16_to_cpu(dleaf->count) - 1, &ex); - if (ex.physical == 0) - return 1; - return 0; + return ex.physical == 0; } static int dleaf2_can_free(struct btree *btree, void *leaf) @@ -132,25 +130,24 @@ static int dleaf2_can_free(struct btree *btree, void *leaf) unsigned count = be16_to_cpu(dleaf->count); assert(dleaf2_sniff(btree, dleaf)); - if (count > 1) - return 0; - return 1; + return count <= 1; } static void __dleaf2_dump(struct btree *btree, struct dleaf2 *dleaf, const char *prefix) { - if (!tux3_trace) - return; + if (tux3_trace) { + unsigned i; - unsigned i; - __tux3_dbg("%sdleaf %p, magic %x, count %u\n", prefix, - dleaf, be16_to_cpu(dleaf->magic), be16_to_cpu(dleaf->count)); - for (i = 0; i < be16_to_cpu(dleaf->count); i++) { - struct extent ex; - get_extent(dleaf->table + i, &ex); - __tux3_dbg(" logical %Lu, physical %Lu, version %u\n", - ex.logical, ex.physical, ex.version); + __tux3_dbg("%sdleaf %p, magic %x, count %u\n", prefix, + dleaf, be16_to_cpu(dleaf->magic), be16_to_cpu(dleaf->count)); + for (i = 0; i < be16_to_cpu(dleaf->count); i++) { + struct extent ex; + + get_extent(dleaf->table + i, &ex); + __tux3_dbg(" logical %Lu, physical %Lu, version %u\n", + ex.logical, ex.physical, ex.version); + } } } @@ -170,20 +167,20 @@ __dleaf2_lookup_index(struct btree *btree, struct dleaf2 *dleaf, /* Paranoia check: last should be sentinel (hole) */ if (dleaf->count) { struct extent ex; + get_extent(dleaf->table + be16_to_cpu(dleaf->count) - 1, &ex); assert(ex.physical == 0); } #endif /* FIXME: binsearch here */ - while (start < limit) { + for (; start < limit; start++) { if (index == get_logical(start)) - return start; + break; else if (index < get_logical(start)) { /* should have diskextent2 of bottom logical on leaf */ assert(dleaf->table < start); return start - 1; } - start++; } return start; @@ -338,6 +335,7 @@ static int dleaf2_chop(struct btree *btree, tuxkey_t start, u64 len, void *leaf) if (dex > dleaf->table) { /* If previous is hole, use it as sentinel */ struct extent prev; + get_extent(dex - 1, &prev); if (prev.physical == 0) { dex--; @@ -358,9 +356,8 @@ static int dleaf2_chop(struct btree *btree, tuxkey_t start, u64 len, void *leaf) dleaf->count = cpu_to_be16((dex - dleaf->table) + 1 + need_sentinel); block = ex.physical + (start - ex.logical); - dex++; - while (dex < dex_limit) { + for (dex++; dex < dex_limit; dex++) { unsigned count; /* Get next diskextent2 */ @@ -378,7 +375,6 @@ static int dleaf2_chop(struct btree *btree, tuxkey_t start, u64 len, void *leaf) } start = ex.logical; block = ex.physical; - dex++; } return 1; @@ -466,9 +462,8 @@ static int dleaf2_read(struct btree *btree, tuxkey_t key_bottom, void *leaf, struct btree_key_range *key) { struct dleaf2 *dleaf = leaf; - unsigned len; + unsigned len = __dleaf2_read(btree, key_bottom, key_limit, dleaf, key, 0); - len = __dleaf2_read(btree, key_bottom, key_limit, dleaf, key, 0); key->start += len; key->len -= len; @@ -487,18 +482,14 @@ static int dleaf2_pre_write(struct btree *btree, tuxkey_t key_bottom, * hole, allocate segment. */ if (rq->overwrite) { - unsigned len; - int last, hole_len; - - len = __dleaf2_read(btree, key_bottom, key_limit, dleaf, key,1); - last = rq->seg_cnt; + int hole_len; + unsigned len = __dleaf2_read(btree, key_bottom, key_limit, dleaf, key,1); + int last = rq->seg_cnt; /* Remove hole from seg[] */ - hole_len = 0; - while (last > rq->seg_idx && !rq->seg[last - 1].block) { + for (hole_len = 0; last > rq->seg_idx && !rq->seg[last - 1].block; last--) { len -= rq->seg[last - 1].count; hole_len += rq->seg[last - 1].count; - last--; } key->start += len; key->len = hole_len; @@ -518,13 +509,12 @@ static int dleaf2_pre_write(struct btree *btree, tuxkey_t key_bottom, static void dleaf2_resize(struct dleaf2 *dleaf, struct diskextent2 *head, int diff) { - void *limit = dleaf->table + be16_to_cpu(dleaf->count); - - if (diff == 0) - return; + if (diff != 0) { + void *limit = dleaf->table + be16_to_cpu(dleaf->count); - memmove(head + diff, head, limit - (void *)head); - be16_add_cpu(&dleaf->count, diff); + memmove(head + diff, head, limit - (void *)head); + be16_add_cpu(&dleaf->count, diff); + } } /* Initialize sentinel by bottom key */ @@ -541,6 +531,7 @@ static inline void dleaf2_init_sentinel(struct sb *sb, struct dleaf2 *dleaf, static tuxkey_t dleaf2_split_at_center(struct dleaf2 *dleaf) { struct extent ex; + get_extent(dleaf->table + be16_to_cpu(dleaf->count) / 2, &ex); return ex.logical; } @@ -566,9 +557,7 @@ struct dex_info { static void find_start_dex(struct btree *btree, struct dleaf2 *dleaf, block_t key_start, struct dex_info *info) { - struct diskextent2 *dex_limit; - - dex_limit = dleaf->table + be16_to_cpu(dleaf->count); + struct diskextent2 *dex_limit = dleaf->table + be16_to_cpu(dleaf->count); info->start_block = 0; info->start_count = 0; @@ -596,10 +585,9 @@ static void find_start_dex(struct btree *btree, struct dleaf2 *dleaf, static void find_end_dex(struct btree *btree, struct dleaf2 *dleaf, block_t key_end, struct dex_info *info) { - struct diskextent2 *limit, *dex_limit; + struct diskextent2 *limit; u16 dleaf_count = be16_to_cpu(dleaf->count); - - dex_limit = dleaf->table + dleaf_count; + struct diskextent2 *dex_limit = dleaf->table + dleaf_count; info->need_sentinel = 0; info->end_block = 0; @@ -745,6 +733,7 @@ static int dleaf2_write(struct btree *btree, tuxkey_t key_bottom, free_len = seg_len; if (info.start_count) { unsigned count = min_t(block_t, free_len, info.start_count); + if (info.start_block) rq->seg_free(btree, info.start_block, count); free_len -= count; diff --git a/user/kernel/filemap.c b/user/kernel/filemap.c index db60c79..cf766a6 100644 --- a/user/kernel/filemap.c +++ b/user/kernel/filemap.c @@ -64,8 +64,10 @@ enum map_mode { /* userland only */ void show_segs(struct block_segment seg[], unsigned segs) { + int i; + __tux3_dbg("%i segs: ", segs); - for (int i = 0; i < segs; i++) + for (i = 0; i < segs; i++) __tux3_dbg("%Lx/%i ", seg[i].block, seg[i].count); __tux3_dbg("\n"); } @@ -94,6 +96,16 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, struct block_segment seg[], unsigned seg_max, enum map_mode mode) { + struct dwalk *walk; + struct dleaf *leaf; + block_t limit; + block_t index, seg_start, block; + struct dwalk headwalk; + unsigned below, above; + block_t below_block, above_block; + tuxkey_t tailkey; + int i; + struct dleaf *tail; struct sb *sb = tux_sb(inode->i_sb); struct btree *btree = &tux_inode(inode)->btree; struct cursor *cursor = NULL; @@ -140,8 +152,8 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, } /* dwalk_end(walk) is true with this. */ - struct dwalk *walk = &(struct dwalk){ }; - struct dleaf *leaf = NULL; + walk = &(struct dwalk){}; + leaf = NULL; if (has_root(btree)) { cursor = alloc_cursor(btree, 1); /* allows for depth increase */ if (!cursor) { @@ -161,7 +173,7 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, /* btree doesn't have root yet */ } - block_t limit = start + count; + limit = start + count; if (cursor) { assert(start >= cursor_this_key(cursor)); /* do not overlap next leaf */ @@ -170,8 +182,8 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, } trace("--- index %Lx, limit %Lx ---", start, limit); - block_t index = start, seg_start, block; - struct dwalk headwalk = *walk; + index = start; + headwalk = *walk; if (!dwalk_end(walk) && dwalk_index(walk) < start) seg_start = dwalk_index(walk); else @@ -184,9 +196,11 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, ex_index = limit; if (index < ex_index) { + unsigned gap; + /* There is hole */ ex_index = min(ex_index, limit); - unsigned gap = ex_index - index; + gap = ex_index - index; index = ex_index; seg[segs++] = (struct block_segment){ .count = gap, @@ -205,7 +219,8 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, } } assert(segs); - unsigned below = start - seg_start, above = index - min(index, limit); + below = start - seg_start; + above = index - min(index, limit); seg[0].block += below; seg[0].count -= below; seg[segs - 1].count -= above; @@ -214,13 +229,12 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, goto out_release; /* Save blocks before change seg[] for below or above. */ - block_t below_block, above_block; below_block = seg[0].block - below; above_block = seg[segs - 1].block + seg[segs - 1].count; if (mode == MAP_REDIRECT) { /* Change the seg[] to redirect this region as one extent */ count = 0; - for (int i = 0; i < segs; i++) { + for (i = 0; i < segs; i++) { /* Logging overwritten extents as free */ if (seg[i].state != BLOCK_SEG_HOLE) map_bfree(inode, seg[i].block, seg[i].count); @@ -231,7 +245,7 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, seg[0].count = count; seg[0].state = BLOCK_SEG_HOLE; } - for (int i = 0; i < segs; i++) { + for (i = 0; i < segs; i++) { int newsegs; if (seg[i].state != BLOCK_SEG_HOLE) continue; @@ -277,8 +291,8 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, dwalk_redirect(&headwalk, leaf, bufdata(cursor_leafbuf(cursor))); leaf = bufdata(cursor_leafbuf(cursor)); - struct dleaf *tail = NULL; - tuxkey_t tailkey = 0; // probably can just use limit instead + tail = NULL; + tailkey = 0; // probably can just use limit instead if (!dwalk_end(walk)) { tail = malloc(sb->blocksize); // error??? dleaf_init(btree, tail); @@ -288,10 +302,12 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, /* Go back to region start and pack in new seg */ dwalk_chop(&headwalk); index = start; - for (int i = -!!below; i < segs + !!above; i++) { + for (i = -!!below; i < segs + !!above; i++) { if (dleaf_free(btree, leaf) < DLEAF_MAX_EXTENT_SIZE) { + struct buffer_head *newbuf; + mark_buffer_dirty_non(cursor_leafbuf(cursor)); - struct buffer_head *newbuf = new_leaf(btree); + newbuf = new_leaf(btree); if (IS_ERR(newbuf)) { segs = PTR_ERR(newbuf); goto out_create; @@ -327,10 +343,12 @@ static int map_region1(struct inode *inode, block_t start, unsigned count, } if (tail) { if (!dleaf_merge(btree, leaf, tail)) { + struct buffer_head *newbuf; + mark_buffer_dirty_non(cursor_leafbuf(cursor)); assert(dleaf_groups(tail) >= 1); /* Tail does not fit, add it as a new btree leaf */ - struct buffer_head *newbuf = new_leaf(btree); + newbuf = new_leaf(btree); if (IS_ERR(newbuf)) { segs = PTR_ERR(newbuf); goto out_create; @@ -614,6 +632,8 @@ int tux3_filemap_redirect_io(int rw, struct bufvec *bufvec) static int filemap_extent_io(enum map_mode mode, int rw, struct bufvec *bufvec) { + int segs; + int i; struct inode *inode = bufvec_inode(bufvec); block_t block, index = bufvec_contig_index(bufvec); unsigned count = bufvec_contig_count(bufvec); @@ -623,12 +643,12 @@ static int filemap_extent_io(enum map_mode mode, int rw, struct bufvec *bufvec) /* FIXME: For now, this is only for write */ assert(mode != MAP_READ); - int segs = map_region(inode, index, count, seg, ARRAY_SIZE(seg), mode); + segs = map_region(inode, index, count, seg, ARRAY_SIZE(seg), mode); if (segs < 0) return segs; assert(segs); - for (int i = 0; i < segs; i++) { + for (i = 0; i < segs; i++) { block = seg[i].block; count = seg[i].count; diff --git a/user/kernel/iattr.c b/user/kernel/iattr.c index 1a87781..e8a6377 100644 --- a/user/kernel/iattr.c +++ b/user/kernel/iattr.c @@ -70,9 +70,10 @@ static inline fixed32 tuxtime(const struct timespec ts) static unsigned encode_asize(unsigned bits) { + int kind; unsigned need = 0; - for (int kind = 0; kind < VAR_ATTRS; kind++) + for (kind = 0; kind < VAR_ATTRS; kind++) if ((bits & (1 << kind))) need += atsize[kind] + 2; return need; @@ -84,10 +85,11 @@ int attr_check(void *attrs, unsigned size) void *limit = attrs + size; unsigned head; - while (attrs < limit - 1) - { + while (attrs < limit - 1) { + unsigned kind; + attrs = decode16(attrs, &head); - unsigned kind = head >> 12; + kind = head >> 12; if (kind >= MAX_ATTRS) return 0; if (attrs + atsize[kind] > limit) @@ -99,10 +101,11 @@ int attr_check(void *attrs, unsigned size) void dump_attrs(struct inode *inode) { + int kind; //tux3_dbg("present = %x", inode->present); struct tux3_inode *tuxnode = tux_inode(inode); - for (int kind = 0; kind < MAX_ATTRS; kind++) { + for (kind = 0; kind < MAX_ATTRS; kind++) { if (!(tux_inode(inode)->present & (1 << kind))) continue; switch (kind) { @@ -142,13 +145,14 @@ void *encode_kind(void *attrs, unsigned kind, unsigned version) static void *encode_attrs(struct btree *btree, void *data, void *attrs, unsigned size) { + int kind; struct sb *sb = btree->sb; struct iattr_req_data *iattr_data = data; struct tux3_iattr_data *idata = iattr_data->idata; struct btree *attr_btree = iattr_data->btree; void *limit = attrs + size - 3; - for (int kind = 0; kind < VAR_ATTRS; kind++) { + for (kind = 0; kind < VAR_ATTRS; kind++) { if (!(idata->present & (1 << kind))) continue; if (attrs >= limit) @@ -193,14 +197,19 @@ void *decode_kind(void *attrs, unsigned *kind, unsigned *version) static void *decode_attrs(struct inode *inode, void *attrs, unsigned size) { - trace_off("decode %u attr bytes", size); - struct sb *sb = tux_sb(inode->i_sb); - struct tux3_inode *tuxnode = tux_inode(inode); - struct root btree_root = no_root; - void *limit = attrs + size; + struct sb *sb; + struct tux3_inode *tuxnode; + struct root btree_root; + void *limit; u64 v64; u32 v32; + trace_off("decode %u attr bytes", size); + sb = tux_sb(inode->i_sb); + tuxnode = tux_inode(inode); + btree_root = no_root; + limit = attrs + size; + while (attrs < limit - 1) { unsigned version, kind; attrs = decode_kind(attrs, &kind, &version); diff --git a/user/kernel/ileaf.c b/user/kernel/ileaf.c index c12d4de..e244600 100644 --- a/user/kernel/ileaf.c +++ b/user/kernel/ileaf.c @@ -105,19 +105,26 @@ static int ileaf_can_free(struct btree *btree, void *leaf) static void ileaf_dump(struct btree *btree, void *vleaf) { + int i; + struct ileaf_attr_ops *attr_ops; + struct ileaf *leaf; + inum_t inum; + __be16 *dict; + unsigned offset; + if (!tux3_trace) return; - struct ileaf_attr_ops *attr_ops = btree->ops->private_ops; - struct ileaf *leaf = vleaf; - inum_t inum = ibase(leaf); - __be16 *dict = ileaf_dict(btree, leaf); - unsigned offset = 0; + attr_ops = btree->ops->private_ops; + leaf = vleaf; + inum = ibase(leaf); + dict = ileaf_dict(btree, leaf); + offset = 0; __tux3_dbg("ileaf 0x%Lx/%i (%x bytes free)", ibase(leaf), icount(leaf), ileaf_free(btree, leaf)); - for (int i = 0; i < icount(leaf); i++, inum++) { + for (i = 0; i < icount(leaf); i++, inum++) { int limit = __atdict(dict, i + 1), size = limit - offset; if (!size) continue; @@ -142,9 +149,13 @@ static void ileaf_dump(struct btree *btree, void *vleaf) void *ileaf_lookup(struct btree *btree, inum_t inum, struct ileaf *leaf, unsigned *result) { + tuxkey_t at, size; + void *attrs; + assert(inum >= ibase(leaf)); - tuxkey_t at = inum - ibase(leaf), size = 0; - void *attrs = NULL; + at = inum - ibase(leaf); + size = 0; + attrs = NULL; trace("lookup inode 0x%Lx, %Lx + %Lx", inum, ibase(leaf), at); if (at < icount(leaf)) { @@ -159,9 +170,10 @@ void *ileaf_lookup(struct btree *btree, inum_t inum, struct ileaf *leaf, unsigne static int isinorder(struct btree *btree, struct ileaf *leaf) { + int i, offset, limit; __be16 *dict = ileaf_dict(btree, leaf); - for (int i = 0, offset = 0, limit; i < icount(leaf); i++, offset = limit) + for (i = 0, offset = 0, limit; i < icount(leaf); i++, offset = limit) if ((limit = __atdict(dict, i + 1)) < offset) return 0; return 1; @@ -202,10 +214,22 @@ static void ileaf_trim(struct btree *btree, struct ileaf *leaf) static tuxkey_t ileaf_split(struct btree *btree, tuxkey_t hint, void *from, void *into) { + struct ileaf *leaf, *dest; + __be16 *dict, *destdict; + unsigned at; + unsigned split, free; + int i; +#ifdef SPLIT_AT_INUM + inum_t round; +#else + unsigned hi; +#endif + assert(ileaf_sniff(btree, from)); - struct ileaf *leaf = from, *dest = into; - __be16 *dict = ileaf_dict(btree, from); - __be16 *destdict = ileaf_dict(btree, into); + leaf = from; + dest = into; + dict = ileaf_dict(btree, from); + destdict = ileaf_dict(btree, into); #ifdef SPLIT_AT_INUM /* @@ -216,10 +240,11 @@ static tuxkey_t ileaf_split(struct btree *btree, tuxkey_t hint, hint++; trace("split at inum 0x%Lx", hint); - unsigned at = min_t(tuxkey_t, hint - ibase(leaf), icount(leaf)); + at = min_t(tuxkey_t, hint - ibase(leaf), icount(leaf)); #else /* binsearch inum starting nearest middle of block */ - unsigned at = 1, hi = icount(leaf); + at = 1; + hi = icount(leaf); while (at < hi) { int mid = (at + hi) / 2; if (*(dict - mid) < (btree->sb->blocksize / 2)) @@ -229,18 +254,19 @@ static tuxkey_t ileaf_split(struct btree *btree, tuxkey_t hint, } #endif /* should trim leading empty inodes on copy */ - unsigned split = atdict(dict, at), free = atdict(dict, icount(leaf)); + split = atdict(dict, at); + free = atdict(dict, icount(leaf)); trace("split at %x of %x", at, icount(leaf)); trace("copy out %x bytes at %x", free - split, split); assert(free >= split); memcpy(dest->table, leaf->table + split, free - split); dest->count = cpu_to_be16(icount(leaf) - at); veccopy(destdict - icount(dest), dict - icount(leaf), icount(dest)); - for (int i = 1; i <= icount(dest); i++) + for (i = 1; i <= icount(dest); i++) add_idict(destdict - i, -split); #ifdef SPLIT_AT_INUM /* round down to multiple of 64 above ibase */ - inum_t round = hint & ~(inum_t)(btree->entries_per_leaf - 1); + round = hint & ~(inum_t)(btree->entries_per_leaf - 1); dest->ibase = cpu_to_be64(round > ibase(leaf) + icount(leaf) ? round : hint); #else dest->ibase = cpu_to_be64(ibase(leaf) + at); @@ -253,6 +279,14 @@ static tuxkey_t ileaf_split(struct btree *btree, tuxkey_t hint, static int ileaf_merge(struct btree *btree, void *vinto, void *vfrom) { + tuxkey_t fromibase; + unsigned count; + int hole; + __be16 *dict, *fromdict; + int need_size; + unsigned limit; + __be16 __limit; + unsigned fromlimit; struct ileaf *into = vinto, *from = vfrom; unsigned fromcount = icount(from); @@ -261,27 +295,27 @@ static int ileaf_merge(struct btree *btree, void *vinto, void *vfrom) return 1; assert(ibase(from) > ibase(into)); - tuxkey_t fromibase = ibase(from); - unsigned count = icount(into); - int hole = fromibase - ibase(into) + count; + fromibase = ibase(from); + count = icount(into); + hole = fromibase - ibase(into) + count; - __be16 *dict = ileaf_dict(btree, into); - __be16 *fromdict = ileaf_dict(btree, from); - int need_size = hole * sizeof(*dict) + ileaf_need(btree, from); + dict = ileaf_dict(btree, into); + fromdict = ileaf_dict(btree, from); + need_size = hole * sizeof(*dict) + ileaf_need(btree, from); if (ileaf_free(btree, into) < need_size) return 0; /* Fill hole of dict until from_ibase */ - unsigned limit = atdict(dict, count); - __be16 __limit = cpu_to_be16(limit); + limit = atdict(dict, count); + __limit = cpu_to_be16(limit); while (hole--) { count++; *(dict - count) = __limit; } /* Copy data from "from" */ - unsigned fromlimit = atdict(fromdict, fromcount); + fromlimit = atdict(fromdict, fromcount); memcpy(into->table + limit, from->table, fromlimit); /* Adjust copying fromdict */ @@ -343,6 +377,7 @@ static int ileaf_chop(struct btree *btree, tuxkey_t start, u64 len, void *leaf) static void *ileaf_resize(struct btree *btree, tuxkey_t inum, void *vleaf, int newsize) { + void *attrs; struct ileaf *ileaf = vleaf; __be16 *dict = ileaf_dict(btree, ileaf); unsigned count = icount(ileaf); @@ -383,15 +418,17 @@ overflow: ileaf->count = cpu_to_be16(count); } - void *attrs = ileaf->table + offset; + attrs = ileaf->table + offset; if (newsize != size) { + int diff; /* Expand/Shrink attr space */ unsigned limit = __atdict(dict, count); + assert(limit >= offset + size); memmove(attrs + newsize, attrs + size, limit - offset - size); /* Adjust dict */ - int diff = newsize - size; + diff = newsize - size; at++; while (at <= count) { add_idict(dict - at, diff); diff --git a/user/kernel/inode.c b/user/kernel/inode.c index b31fea5..d73e086 100644 --- a/user/kernel/inode.c +++ b/user/kernel/inode.c @@ -414,6 +414,7 @@ static int check_present(struct inode *inode) static int open_inode(struct inode *inode) { + struct ileaf_req rq; struct sb *sb = tux_sb(inode->i_sb); struct btree *itree = itree_btree(sb); int err; @@ -427,13 +428,9 @@ static int open_inode(struct inode *inode) goto out; /* Read inode attribute from inode btree */ - struct ileaf_req rq = { - .key = { - .start = tux_inode(inode)->inum, - .len = 1, - }, - .data = inode, - }; + rq.key.start = tux_inode(inode)->inum; + rq.key.len = 1; + rq.data = inode; err = btree_read(cursor, &rq.key); if (!err) { check_present(inode); @@ -481,6 +478,9 @@ struct inode *tux3_ilookup(struct sb *sb, inum_t inum) static int save_inode(struct inode *inode, struct tux3_iattr_data *idata, unsigned delta) { + struct cursor *cursor; + struct iattr_req_data iattr_data; + struct ileaf_req rq; struct sb *sb = tux_sb(inode->i_sb); struct btree *itree = itree_btree(sb); inum_t inum = tux_inode(inode)->inum; @@ -499,7 +499,7 @@ static int save_inode(struct inode *inode, struct tux3_iattr_data *idata, return err; #endif - struct cursor *cursor = alloc_cursor(itree, 1); /* +1 for new depth */ + cursor = alloc_cursor(itree, 1); /* +1 for new depth */ if (!cursor) return -ENOMEM; @@ -513,18 +513,12 @@ static int save_inode(struct inode *inode, struct tux3_iattr_data *idata, } /* Write inode attributes to inode btree */ - struct iattr_req_data iattr_data = { - .idata = idata, - .btree = &tux_inode(inode)->btree, - .inode = inode, - }; - struct ileaf_req rq = { - .key = { - .start = inum, - .len = 1, - }, - .data = &iattr_data, - }; + iattr_data.idata = idata, + iattr_data.btree = &tux_inode(inode)->btree, + iattr_data.inode = inode, + rq.key.start = inum; + rq.key.len = 1; + rq.data = &iattr_data; err = btree_write(cursor, &rq.key); if (err) goto error_release; diff --git a/user/kernel/log.c b/user/kernel/log.c index b96fa00..b5b763e 100644 --- a/user/kernel/log.c +++ b/user/kernel/log.c @@ -228,10 +228,12 @@ int tux3_logmap_io(int rw, struct bufvec *bufvec) static void log_intent(struct sb *sb, u8 intent) { + unsigned char *data; + /* Check whether array is uptodate */ BUILD_BUG_ON(ARRAY_SIZE(log_size) != LOG_TYPES); - unsigned char *data = log_begin(sb, 1); + data = log_begin(sb, 1); *data++ = intent; log_end(sb, data); } @@ -526,9 +528,11 @@ int unstash(struct sb *sb, struct stash *stash, unstash_t actor) if (flink_empty(head)) return 0; while (1) { + u64 *vec, *top; + page = __flink_next_entry(head, struct page, private); - u64 *vec = page_address(page); - u64 *top = page_address(page) + PAGE_SIZE; + vec = page_address(page); + top = page_address(page) + PAGE_SIZE; if (top == stash->top) top = stash->pos; @@ -551,16 +555,15 @@ int unstash(struct sb *sb, struct stash *stash, unstash_t actor) */ int stash_walk(struct sb *sb, struct stash *stash, unstash_t actor) { + struct link *link, *first; struct flink_head *head = &stash->head; - struct page *page; if (flink_empty(head)) return 0; - struct link *link, *first; link = first = flink_next(head); do { - page = __link_entry(link, struct page, private); + struct page *page = __link_entry(link, struct page, private); u64 *vec = page_address(page); u64 *top = page_address(page) + PAGE_SIZE; diff --git a/user/kernel/namei.c b/user/kernel/namei.c index 6da96ae..b1bc6fd 100644 --- a/user/kernel/namei.c +++ b/user/kernel/namei.c @@ -187,11 +187,12 @@ static int tux3_symlink(struct inode *dir, struct dentry *dentry, static int tux3_unlink(struct inode *dir, struct dentry *dentry) { + int err; struct inode *inode = dentry->d_inode; struct sb *sb = tux_sb(inode->i_sb); change_begin(sb); - int err = tux_del_dirent(dir, dentry); + err = tux_del_dirent(dir, dentry); if (!err) { tux3_iattrdirty(inode); inode->i_ctime = dir->i_ctime; diff --git a/user/kernel/orphan.c b/user/kernel/orphan.c index 916bb4e..6e89b35 100644 --- a/user/kernel/orphan.c +++ b/user/kernel/orphan.c @@ -112,6 +112,7 @@ int tux3_unify_orphan_add(struct sb *sb, struct list_head *orphan_add) down_write(&cursor->btree->lock); while (!list_empty(orphan_add)) { + struct ileaf_req rq; struct tux3_inode *tuxnode =orphan_list_entry(orphan_add->next); trace("inum %Lu", tuxnode->inum); @@ -122,12 +123,8 @@ int tux3_unify_orphan_add(struct sb *sb, struct list_head *orphan_add) goto out; /* Write orphan inum into orphan btree */ - struct ileaf_req rq = { - .key = { - .start = tuxnode->inum, - .len = 1, - }, - }; + rq.key.start =tuxnode->inum; + rq.key.len = 1; err = btree_write(cursor, &rq.key); release_cursor(cursor); if (err) @@ -360,6 +357,7 @@ static int load_enum_inode(struct btree *btree, inum_t inum, void *attrs, /* Load orphan inode from sb->otree */ static int load_otree_orphan_inode(struct replay *rp) { + struct cursor *cursor; struct sb *sb = rp->sb; struct btree *otree = otree_btree(sb); struct ileaf_enumrate_cb cb = { @@ -371,7 +369,7 @@ static int load_otree_orphan_inode(struct replay *rp) if (!has_root(&sb->otree)) return 0; - struct cursor *cursor = alloc_cursor(otree, 0); + cursor = alloc_cursor(otree, 0); if (!cursor) return -ENOMEM; diff --git a/user/kernel/utility.c b/user/kernel/utility.c index 1c76fb5..555225b 100644 --- a/user/kernel/utility.c +++ b/user/kernel/utility.c @@ -6,8 +6,10 @@ int vecio(int rw, struct block_device *dev, loff_t offset, unsigned vecs, struct bio_vec *vec, bio_end_io_t endio, void *info) { + struct bio *bio; + BUG_ON(vecs > bio_get_nr_vecs(dev)); - struct bio *bio = bio_alloc(GFP_NOIO, vecs); + bio = bio_alloc(GFP_NOIO, vecs); if (!bio) return -ENOMEM; bio->bi_bdev = dev; diff --git a/user/kernel/xattr.c b/user/kernel/xattr.c index 4b0d841..46b823a 100644 --- a/user/kernel/xattr.c +++ b/user/kernel/xattr.c @@ -90,6 +90,8 @@ static struct buffer_head *blockread_unatom(struct inode *atable, atom_t atom, static loff_t unatom_dict_read(struct inode *atable, atom_t atom) { + loff_t where; + __be64 *unatom_dict; struct buffer_head *buffer; unsigned offset; @@ -97,8 +99,8 @@ static loff_t unatom_dict_read(struct inode *atable, atom_t atom) if (!buffer) return -EIO; - __be64 *unatom_dict = bufdata(buffer); - loff_t where = be64_to_cpu(unatom_dict[offset]); + unatom_dict = bufdata(buffer); + where = be64_to_cpu(unatom_dict[offset]); blockput(buffer); return where; @@ -106,6 +108,7 @@ static loff_t unatom_dict_read(struct inode *atable, atom_t atom) static loff_t unatom_dict_write(struct inode *atable, atom_t atom, loff_t where) { + __be64 *unatom_dict; unsigned delta = tux3_get_current_delta(); struct buffer_head *buffer, *clone; loff_t old; @@ -127,7 +130,7 @@ static loff_t unatom_dict_write(struct inode *atable, atom_t atom, loff_t where) return PTR_ERR(clone); } - __be64 *unatom_dict = bufdata(clone); + unatom_dict = bufdata(clone); old = be64_to_cpu(unatom_dict[offset]); unatom_dict[offset] = cpu_to_be64(where); mark_buffer_dirty_non(clone); @@ -144,11 +147,13 @@ static int is_free_unatom(loff_t where) /* Convert atom to name */ static int unatom(struct inode *atable, atom_t atom, char *name, unsigned size) { + tux_dirent *entry; + unsigned len; struct sb *sb = tux_sb(atable->i_sb); struct buffer_head *buffer; int err; - loff_t where = unatom_dict_read(atable, atom); + if (where < 0) { err = where; goto error; @@ -159,13 +164,13 @@ static int unatom(struct inode *atable, atom_t atom, char *name, unsigned size) err = -EIO; goto error; } - tux_dirent *entry = bufdata(buffer) + (where & sb->blockmask); + entry = bufdata(buffer) + (where & sb->blockmask); if (entry_atom(entry) != atom) { tux3_fs_error(sb, "atom %x reverse entry broken", atom); err = -EIO; goto error_blockput; } - unsigned len = entry->name_len; + len = entry->name_len; if (size) { if (len > size) { err = -ERANGE; @@ -186,6 +191,7 @@ error: /* Find free atom */ static int get_freeatom(struct inode *atable, atom_t *atom) { + loff_t next; struct sb *sb = tux_sb(atable->i_sb); atom_t freeatom = sb->freeatom; @@ -194,7 +200,7 @@ static int get_freeatom(struct inode *atable, atom_t *atom) return 0; } - loff_t next = unatom_dict_read(atable, freeatom); + next = unatom_dict_read(atable, freeatom); if (next < 0) return next; if (!is_free_unatom(next)) { @@ -298,6 +304,7 @@ static int update_refcount(struct sb *sb, struct buffer_head *buffer, /* Modify atom refcount */ static int atomref(struct inode *atable, atom_t atom, int use) { + int low; struct sb *sb = tux_sb(atable->i_sb); unsigned shift = sb->blockbits - ATOMREF_BLKBITS; unsigned block = sb->atomref_base + ATOMREF_SIZE * (atom >> shift); @@ -311,7 +318,7 @@ static int atomref(struct inode *atable, atom_t atom, int use) return -EIO; refcount = bufdata(buffer); - int low = be16_to_cpu(refcount[offset]) + use; + low = be16_to_cpu(refcount[offset]) + use; trace("inc atom %x by %d, offset %x[%x], low = %d", atom, use, block, offset, low); @@ -321,12 +328,14 @@ static int atomref(struct inode *atable, atom_t atom, int use) return err; if (!low || (low & (-1 << 16))) { + int high; + buffer = blockread(mapping(atable), block + 1); if (!buffer) return -EIO; refcount = bufdata(buffer); - int high = be16_to_cpu(refcount[offset]); + high = be16_to_cpu(refcount[offset]); if (!low) blockput(buffer); else { @@ -348,9 +357,12 @@ static int atomref(struct inode *atable, atom_t atom, int use) } if (kill) { + loff_t next, where; + tux_dirent *entry; + trace("delete atom %x", atom); - loff_t next = UNATOM_FREE_MAGIC | sb->freeatom; - loff_t where = unatom_dict_write(atable, atom, next); + next = UNATOM_FREE_MAGIC | sb->freeatom; + where = unatom_dict_write(atable, atom, next); if (where < 0) { /* FIXME: better set a flag that unatom broke * or something! */ @@ -365,7 +377,7 @@ static int atomref(struct inode *atable, atom_t atom, int use) return -EIO; } - tux_dirent *entry = bufdata(buffer) + (where & sb->blockmask); + entry = bufdata(buffer) + (where & sb->blockmask); if (entry_atom(entry) == atom) { /* FIXME: better set a flag that unatom broke * or something! */ @@ -388,27 +400,35 @@ static int atomref(struct inode *atable, atom_t atom, int use) /* userland only */ void dump_atoms(struct inode *atable) { + unsigned j; struct sb *sb = tux_sb(atable->i_sb); unsigned blocks = (sb->atomgen + (sb->blockmask >> ATOMREF_BLKBITS)) >> (sb->blockbits - ATOMREF_BLKBITS); - for (unsigned j = 0; j < blocks; j++) { + for (j = 0; j < blocks; j++) { + __be16 *lorefs, *hirefs; + unsigned i; unsigned block = sb->atomref_base + ATOMREF_SIZE * j; struct buffer_head *lobuf, *hibuf; + if (!(lobuf = blockread(mapping(atable), block))) goto eek; if (!(hibuf = blockread(mapping(atable), block + 1))) { blockput(lobuf); goto eek; } - __be16 *lorefs = bufdata(lobuf), *hirefs = bufdata(hibuf); - for (unsigned i = 0; i < (sb->blocksize >> ATOMREF_BLKBITS); i++) { + lorefs = bufdata(lobuf); + hirefs = bufdata(hibuf); + for (i = 0; i < (sb->blocksize >> ATOMREF_BLKBITS); i++) { + atom_t atom; + char name[100]; + int len; unsigned refs = (be16_to_cpu(hirefs[i]) << 16) | be16_to_cpu(lorefs[i]); + if (!refs) continue; - atom_t atom = i; - char name[100]; - int len = unatom(atable, atom, name, sizeof(name)); + atom = i; + len = unatom(atable, atom, name, sizeof(name)); if (len < 0) goto eek; __tux3_dbg("%.*s: atom 0x%08x, ref %u\n", @@ -430,8 +450,10 @@ void show_freeatoms(struct sb *sb) atom_t atom = sb->freeatom; while (atom) { + loff_t next; + tux3_dbg("free atom: %x", atom); - loff_t next = unatom_dict_read(atable, atom); + next = unatom_dict_read(atable, atom); if (next < 0) goto eek; if (!is_free_unatom(next)) @@ -532,13 +554,15 @@ static inline struct xcache_entry *xcache_limit(struct xcache *xcache) int xcache_dump(struct inode *inode) { + struct xcache_entry *xattr; + struct xcache_entry *xlimit; struct xcache *xcache = tux_inode(inode)->xcache; if (!xcache) return 0; - struct xcache_entry *xattr = xcache->xattrs; - struct xcache_entry *xlimit = xcache_limit(xcache); + xattr = xcache->xattrs; + xlimit = xcache_limit(xcache); //__tux3_dbg("xattrs %p/%i", inode->xcache, inode->xcache->size); while (xattr < xlimit) { @@ -605,6 +629,7 @@ static inline int remove_old(struct xcache *xcache, struct xcache_entry *xattr) static int xcache_update(struct inode *inode, unsigned atom, const void *data, unsigned len, unsigned flags) { + unsigned more; struct xcache *xcache = tux_inode(inode)->xcache; struct xcache_entry *xattr = xcache_lookup(xcache, atom); int use = 0; @@ -624,7 +649,7 @@ static int xcache_update(struct inode *inode, unsigned atom, const void *data, } /* Insert new */ - unsigned more = sizeof(*xattr) + len; + more = sizeof(*xattr) + len; if (!xcache || xcache->size + more > xcache->maxsize) { unsigned oldsize = xcache ? xcache->size : 0; int err = expand_xcache(inode, oldsize + more); @@ -678,6 +703,8 @@ int xcache_remove_all(struct inode *inode) int get_xattr(struct inode *inode, const char *name, unsigned len, void *data, unsigned size) { + struct xcache *xcache; + struct xcache_entry *xattr; struct inode *atable = tux_sb(inode->i_sb)->atable; atom_t atom; int ret; @@ -687,8 +714,8 @@ int get_xattr(struct inode *inode, const char *name, unsigned len, void *data, if (ret) goto out; - struct xcache *xcache = tux_inode(inode)->xcache; - struct xcache_entry *xattr = xcache_lookup(xcache, atom); + xcache = tux_inode(inode)->xcache; + xattr = xcache_lookup(xcache, atom); if (IS_ERR(xattr)) { ret = PTR_ERR(xattr); goto out; @@ -706,14 +733,15 @@ out: int set_xattr(struct inode *inode, const char *name, unsigned len, const void *data, unsigned size, unsigned flags) { + int err; + atom_t atom; struct sb *sb = tux_sb(inode->i_sb); struct inode *atable = sb->atable; mutex_lock(&atable->i_mutex); change_begin(sb); - atom_t atom; - int err = make_atom(atable, name, len, &atom); + err = make_atom(atable, name, len, &atom); if (!err) { err = xcache_update(inode, atom, data, size, flags); if (err) { @@ -729,6 +757,7 @@ int set_xattr(struct inode *inode, const char *name, unsigned len, int del_xattr(struct inode *inode, const char *name, unsigned len) { + atom_t atom; struct sb *sb = tux_sb(inode->i_sb); struct inode *atable = sb->atable; int err; @@ -736,18 +765,19 @@ int del_xattr(struct inode *inode, const char *name, unsigned len) mutex_lock(&atable->i_mutex); change_begin(sb); - atom_t atom; err = find_atom(atable, name, len, &atom); if (!err) { + int used; struct xcache *xcache = tux_inode(inode)->xcache; struct xcache_entry *xattr = xcache_lookup(xcache, atom); + if (IS_ERR(xattr)) { err = PTR_ERR(xattr); goto out; } tux3_xattrdirty(inode); - int used = remove_old(xcache, xattr); + used = remove_old(xcache, xattr); if (used) { tux3_mark_inode_dirty(inode); /* FIXME: error check */ @@ -763,19 +793,24 @@ out: int list_xattr(struct inode *inode, char *text, size_t size) { + struct xcache *xcache; + struct xcache_entry *xattr; + struct xcache_entry *xlimit; + char *base, *top; + int err; struct sb *sb = tux_sb(inode->i_sb); struct inode *atable = sb->atable; mutex_lock(&atable->i_mutex); - struct xcache *xcache = tux_inode(inode)->xcache; + xcache = tux_inode(inode)->xcache; if (!xcache) return 0; - struct xcache_entry *xattr = xcache->xattrs; - struct xcache_entry *xlimit = xcache_limit(xcache); - char *base = text, *top = text + size; - int err; + xattr = xcache->xattrs; + xlimit = xcache_limit(xcache); + base = text; + top = text + size; while (xattr < xlimit) { atom_t atom = xattr->atom; @@ -823,14 +858,18 @@ error: unsigned encode_xsize(struct inode *inode) { + unsigned size, xatsize; + struct xcache_entry *xattr; + struct xcache_entry *xlimit; struct xcache *xcache = tux_inode(inode)->xcache; if (!xcache) return 0; - unsigned size = 0, xatsize = atsize[XATTR_ATTR]; - struct xcache_entry *xattr = xcache->xattrs; - struct xcache_entry *xlimit = xcache_limit(xcache); + size = 0; + xatsize = atsize[XATTR_ATTR]; + xattr = xcache->xattrs; + xlimit = xcache_limit(xcache); while (xattr < xlimit) { size += 2 + xatsize + xattr->size; @@ -842,14 +881,17 @@ unsigned encode_xsize(struct inode *inode) void *encode_xattrs(struct inode *inode, void *attrs, unsigned size) { + struct xcache_entry *xattr; + struct xcache_entry *xlimit; + void *limit; struct xcache *xcache = tux_inode(inode)->xcache; if (!xcache) return attrs; - struct xcache_entry *xattr = xcache->xattrs; - struct xcache_entry *xlimit = xcache_limit(xcache); - void *limit = attrs + size - 3; + xattr = xcache->xattrs; + xlimit = xcache_limit(xcache); + limit = attrs + size - 3; while (xattr < xlimit) { if (attrs >= limit) diff --git a/user/options.c b/user/options.c index b97fa8e..96e3d73 100644 --- a/user/options.c +++ b/user/options.c @@ -161,8 +161,10 @@ int optspace(struct options *options, int argc, const char *argv[]) int optcount(void *work, int opt) { + int i; int count = 0; - for (int i = 0; i < ((struct optv *)work)->optc; i++) + + for (i = 0; i < ((struct optv *)work)->optc; i++) count += optindex(work, i) == opt; return count; } diff --git a/user/super.c b/user/super.c index 2ab462e..9795a24 100644 --- a/user/super.c +++ b/user/super.c @@ -66,6 +66,7 @@ int put_super(struct sb *sb) /* Clear first and last block to get rid of other magic */ static int clear_other_magic(struct sb *sb) { + int i; struct { loff_t loc; unsigned len; @@ -77,7 +78,7 @@ static int clear_other_magic(struct sb *sb) unsigned maxlen = 0; int err; - for (int i = 0; i < ARRAY_SIZE(area); i++) + for (i = 0; i < ARRAY_SIZE(area); i++) maxlen = max(maxlen, area[i].len); data = malloc(maxlen); @@ -85,7 +86,7 @@ static int clear_other_magic(struct sb *sb) return -ENOMEM; memset(data, 0, maxlen); - for (int i = 0; i < ARRAY_SIZE(area); i++) { + for (i = 0; i < ARRAY_SIZE(area); i++) { err = devio(WRITE, sb->dev, area[i].loc, data, area[i].len); if (err) break; diff --git a/user/tux3.c b/user/tux3.c index 9c1d0f2..cba8d52 100644 --- a/user/tux3.c +++ b/user/tux3.c @@ -110,8 +110,10 @@ static void command_options(int *argc, const char ***args, struct options *options, int need, const char *progname, const char *cmdname, const char *blurb, struct vars *vars) { + int i; unsigned space = optspace(options, *argc, *args); void *optv = malloc(space); + if (!optv) strerror_exit(1, errno, "malloc"); @@ -119,7 +121,7 @@ static void command_options(int *argc, const char ***args, if (optc < 0) error_exit("%s!", opterror(optv)); - for (int i = 0; i < optc; i++) { + for (i = 0; i < optc; i++) { const char *value = optvalue(optv, i); switch (options[optindex(optv, i)].terse[0]) { case 'b': @@ -151,6 +153,7 @@ static void command_options(int *argc, const char ***args, int main(int argc, char *argv[]) { + int i; const char *progname = optbasename(argv[0]); const char **args = (const char **)argv; const char *blurb = " "; @@ -195,10 +198,12 @@ int main(int argc, char *argv[]) int verbose = 0; - for (int i = 0; i < optc; i++) { + for (i = 0; i < optc; i++) { + int j; + switch (options[optindex(optv, i)].terse[0]) { case 'L': - for (int j = 0; j < ARRAY_SIZE(commands); j++) + for (j = 0; j < ARRAY_SIZE(commands); j++) printf("%s ", commands[j]); printf("\n"); exit(0); diff --git a/user/tux3_dump.c b/user/tux3_dump.c index 6a054d8..51bc3e9 100644 --- a/user/tux3_dump.c +++ b/user/tux3_dump.c @@ -183,9 +183,10 @@ static void stats_data_add(struct stats_btree *stats, block_t block, static struct stats_btree_level stats_levels_sum(struct stats_btree *stats, int depth) { + int i; struct stats_btree_level sum = {}; - for (int i = 0; i <= depth; i++) { + for (i = 0; i <= depth; i++) { sum.block.blocks += stats->levels[i].block.blocks; sum.block.empty += stats->levels[i].block.empty; sum.block.bytes += stats->levels[i].block.bytes; @@ -199,6 +200,7 @@ stats_levels_sum(struct stats_btree *stats, int depth) static void stats_btree_merge(struct stats_btree **a, struct stats_btree *b) { + int i; /* No need to merge */ if (b == NULL) return; @@ -214,7 +216,7 @@ static void stats_btree_merge(struct stats_btree **a, struct stats_btree *b) /* Copy (*a) to tmp */ tmp->depth_seek = (*a)->depth_seek; tmp->data = (*a)->data; - for (int i = 0; i < (*a)->depth; i++) + for (i = 0; i < (*a)->depth; i++) tmp->levels[i] = (*a)->levels[i]; /* Set leaf info to new depth */ tmp->levels[tmp->depth] = (*a)->levels[(*a)->depth]; @@ -225,7 +227,7 @@ static void stats_btree_merge(struct stats_btree **a, struct stats_btree *b) } /* Merge bnode => bnode, and leaf => leaf */ - for (int i = 0; i <= b->depth; i++) { + for (i = 0; i <= b->depth; i++) { int level = i < b->depth ? i : (*a)->depth; struct stats_btree_level *la = &(*a)->levels[level]; struct stats_btree_level *lb = &b->levels[i]; @@ -274,7 +276,9 @@ static void stats_print_depth_seek(struct stats_seek *stats) static void stats_print_seeks(struct sb *sb, struct stats_btree *stats, int data, int dir) { - for (int i = 0; i <= stats->depth; i++) { + int i; + + for (i = 0; i <= stats->depth; i++) { char prefix[64]; if (i < stats->depth) diff --git a/user/tux3_graph.c b/user/tux3_graph.c index 5adfd7c..ace9e3b 100644 --- a/user/tux3_graph.c +++ b/user/tux3_graph.c @@ -239,11 +239,12 @@ static void draw_bitmap_data(struct graph_info *gi, struct btree *btree, struct buffer_head *leafbuf, block_t index, block_t block, unsigned count) { + unsigned i; struct sb *sb = btree->sb; struct inode *bitmap = sb->bitmap; void *data; - for (unsigned i = 0; i < count; i++) { + for (i = 0; i < count; i++) { unsigned idx, size = sb->blocksize * 8; struct buffer_head *buffer; @@ -309,10 +310,11 @@ static void draw_countmap_data(struct graph_info *gi, struct btree *btree, struct buffer_head *leafbuf, block_t index, block_t block, unsigned count) { + unsigned i; struct sb *sb = btree->sb; struct inode *countmap = sb->countmap; - for (unsigned i = 0; i < count; i++) { + for (i = 0; i < count; i++) { unsigned size = sb->blocksize >> 1; block_t group = (index + i) << (sb->blockbits - 1); struct buffer_head *buffer; @@ -460,11 +462,13 @@ static void draw_atable_data(struct graph_info *gi, struct btree *btree, { static int start_atomref = 1, start_unatom = 1; + unsigned i; struct sb *sb = btree->sb; - struct buffer_head *buffer, *hi_buf; - for (unsigned i = 0; i < count; i++) { - buffer = blockread(mapping(sb->atable), index + i); + for (i = 0; i < count; i++) { + struct buffer_head *hi_buf; + struct buffer_head *buffer = blockread(mapping(sb->atable), index + i); + assert(buffer); if (index < sb->atomref_base) { @@ -531,10 +535,11 @@ static void draw_dir_data(struct graph_info *gi, struct btree *btree, struct buffer_head *leafbuf, block_t index, block_t block, unsigned count) { - struct buffer_head *buffer; + unsigned i; + + for (i = 0; i < count; i++) { + struct buffer_head *buffer = blockread(mapping(btree_inode(btree)), index + i); - for (unsigned i = 0; i < count; i++) { - buffer = blockread(mapping(btree_inode(btree)), index + i); assert(buffer); __draw_dir_data(gi, btree, leafbuf, buffer, block + i, 0); @@ -635,8 +640,10 @@ static inline struct group *dleaf1_group_ptr(struct group *groups, int gr) static inline struct entry *dleaf1_entries(struct dleaf *dleaf, struct group *groups, int gr) { + int i; struct entry *entries = (struct entry *)(groups - dleaf_groups(dleaf)); - for (int i = 0; i < gr; i++) + + for (i = 0; i < gr; i++) entries -= group_count(dleaf1_group_ptr(groups, i)); return entries; } @@ -692,6 +699,7 @@ static void draw_dleaf1(struct graph_info *gi, struct btree *btree, const char *dleaf_name = get_dleaf_name(leafbuf); struct diskextent *extents; int gr; + int ent; draw_dleaf_start(gi, leafbuf); @@ -704,7 +712,8 @@ static void draw_dleaf1(struct graph_info *gi, struct btree *btree, for (gr = 0; gr < dleaf_groups(dleaf); gr++) { struct group *group = dleaf1_group_ptr(groups, gr); struct entry *entries = dleaf1_entries(dleaf, groups, gr); - for (int ent = 0; ent < group_count(group); ent++) { + + for (ent = 0; ent < group_count(group); ent++) { int ex, ex_count = dleaf1_extent_count(entries, ent); extents = dleaf1_extents(dleaf, groups, gr, ent); for (ex = 0; ex < ex_count; ex++) { @@ -759,7 +768,7 @@ static void draw_dleaf1(struct graph_info *gi, struct btree *btree, "%s:gr%u:w -> %s:gr%uent%u:w;\n", dleaf_name, gr, dleaf_name, gr, 0); - for (int ent = 0; ent < group_count(group); ent++) { + for (ent = 0; ent < group_count(group); ent++) { /* write link: dleaf:entry -> dleaf:extent */ fprintf(gi->fp, "%s:gr%uent%u:w -> %s:gr%uent%uex%u:w;\n", diff --git a/user/walk.c b/user/walk.c index 384a4c8..ab7adf6 100644 --- a/user/walk.c +++ b/user/walk.c @@ -27,10 +27,11 @@ static void walk_extent(struct btree *btree, struct buffer_head *leafbuf, walk_data_cb walk_data, void *callback, void *data) { - struct buffer_head *buffer; + unsigned i; + + for (i = 0; i < count; i++) { + struct buffer_head *buffer = blockread(mapping(btree_inode(btree)), index + i); - for (unsigned i = 0; i < count; i++) { - buffer = blockread(mapping(btree_inode(btree)), index + i); assert(buffer); walk_data(btree, leafbuf, buffer, block + i, callback, data);