cregit-Linux how code gets into the kernel

Release 4.10 fs/jffs2/readinode.c

Directory: fs/jffs2
 * JFFS2 -- Journalling Flash File System, Version 2.
 * Copyright © 2001-2007 Red Hat, Inc.
 * Created by David Woodhouse <>
 * For licensing information, see the file 'LICENCE' in this directory.

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/crc32.h>
#include <linux/pagemap.h>
#include <linux/mtd/mtd.h>
#include <linux/compiler.h>
#include "nodelist.h"

 * Check the data CRC of the node.
 * Returns: 0 if the data CRC is correct;
 *          1 - if incorrect;
 *          error code if an error occurred.

static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { struct jffs2_raw_node_ref *ref = tn->fn->raw; int err = 0, pointed = 0; struct jffs2_eraseblock *jeb; unsigned char *buffer; uint32_t crc, ofs, len; size_t retlen; BUG_ON(tn->csize == 0); /* Calculate how many bytes were already checked */ ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); len = tn->csize; if (jffs2_is_writebuffered(c)) { int adj = ofs % c->wbuf_pagesize; if (likely(adj)) adj = c->wbuf_pagesize - adj; if (adj >= tn->csize) { dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", ref_offset(ref), tn->csize, ofs); goto adj_acc; } ofs += adj; len -= adj; } dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); #ifndef __ECOS /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(), * adding and jffs2_flash_read_end() interface. */ err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL); if (!err && retlen < len) { JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); mtd_unpoint(c->mtd, ofs, retlen); } else if (err) { if (err != -EOPNOTSUPP) JFFS2_WARNING("MTD point failed: error code %d.\n", err); } else pointed = 1; /* succefully pointed to device */ #endif if (!pointed) { buffer = kmalloc(len, GFP_KERNEL); if (unlikely(!buffer)) return -ENOMEM; /* TODO: this is very frequent pattern, make it a separate * routine */ err = jffs2_flash_read(c, ofs, len, &retlen, buffer); if (err) { JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err); goto free_out; } if (retlen != len) { JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len); err = -EIO; goto free_out; } } /* Continue calculating CRC */ crc = crc32(tn->partial_crc, buffer, len); if(!pointed) kfree(buffer); #ifndef __ECOS else mtd_unpoint(c->mtd, ofs, len); #endif if (crc != tn->data_crc) { JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", ref_offset(ref), tn->data_crc, crc); return 1; } adj_acc: jeb = &c->blocks[ref->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, ref); /* If it should be REF_NORMAL, it'll get marked as such when we build the fragtree, shortly. No need to worry about GC moving it while it's marked REF_PRISTINE -- GC won't happen till we've finished checking every inode anyway. */ ref->flash_offset |= REF_PRISTINE; /* * Mark the node as having been checked and fix the * accounting accordingly. */ spin_lock(&c->erase_completion_lock); jeb->used_size += len; jeb->unchecked_size -= len; c->used_size += len; c->unchecked_size -= len; jffs2_dbg_acct_paranoia_check_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); return 0; free_out: if(!pointed) kfree(buffer); #ifndef __ECOS else mtd_unpoint(c->mtd, ofs, len); #endif return err; }


david woodhousedavid woodhouse53696.06%333.33%
artem bityutskiyartem bityutskiy132.33%333.33%
jared hulbertjared hulbert71.25%111.11%
alexey korolevalexey korolev10.18%111.11%
andy loweandy lowe10.18%111.11%

/* * Helper function for jffs2_add_older_frag_to_fragtree(). * * Checks the node if we are in the checking stage. */
static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { int ret; BUG_ON(ref_obsolete(tn->fn->raw)); /* We only check the data CRC of unchecked nodes */ if (ref_flags(tn->fn->raw) != REF_UNCHECKED) return 0; dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n", tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw)); ret = check_node_data(c, tn); if (unlikely(ret < 0)) { JFFS2_ERROR("check_node_data() returned error: %d.\n", ret); } else if (unlikely(ret > 0)) { dbg_readinode("CRC error, mark it obsolete.\n"); jffs2_mark_node_obsolete(c, tn->fn->raw); } return ret; }


david woodhousedavid woodhouse138100.00%1100.00%

static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset) { struct rb_node *next; struct jffs2_tmp_dnode_info *tn = NULL; dbg_readinode("root %p, offset %d\n", tn_root, offset); next = tn_root->rb_node; while (next) { tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb); if (tn->fn->ofs < offset) next = tn->rb.rb_right; else if (tn->fn->ofs >= offset) next = tn->rb.rb_left; else break; } return tn; }


david woodhousedavid woodhouse103100.00%1100.00%

static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { jffs2_mark_node_obsolete(c, tn->fn->raw); jffs2_free_full_dnode(tn->fn); jffs2_free_tmp_dnode_info(tn); }


david woodhousedavid woodhouse39100.00%1100.00%

/* * This function is used when we read an inode. Data nodes arrive in * arbitrary order -- they may be older or newer than the nodes which * are already in the tree. Where overlaps occur, the older node can * be discarded as long as the newer passes the CRC check. We don't * bother to keep track of holes in this rbtree, and neither do we deal * with frags -- we can have multiple entries starting at the same * offset, and the one with the smallest length will come first in the * ordering. * * Returns 0 if the node was handled (including marking it obsolete) * < 0 an if error occurred */
static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c, struct jffs2_readinode_info *rii, struct jffs2_tmp_dnode_info *tn) { uint32_t fn_end = tn->fn->ofs + tn->fn->size; struct jffs2_tmp_dnode_info *this, *ptn; dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw)); /* If a node has zero dsize, we only have to keep it if it might be the node with highest version -- i.e. the one which will end up as f->metadata. Note that such nodes won't be REF_UNCHECKED since there are no data to check anyway. */ if (!tn->fn->size) { if (rii->mdata_tn) { if (rii->mdata_tn->version < tn->version) { /* We had a candidate mdata node already */ dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version); jffs2_kill_tn(c, rii->mdata_tn); } else { dbg_readinode("kill new mdata with ver %d (older than existing %d\n", tn->version, rii->mdata_tn->version); jffs2_kill_tn(c, tn); return 0; } } rii->mdata_tn = tn; dbg_readinode("keep new mdata with ver %d\n", tn->version); return 0; } /* Find the earliest node which _may_ be relevant to this one */ this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs); if (this) { /* If the node is coincident with another at a lower address, back up until the other node is found. It may be relevant */ while (this->overlapped) { ptn = tn_prev(this); if (!ptn) { /* * We killed a node which set the overlapped * flags during the scan. Fix it up. */ this->overlapped = 0; break; } this = ptn; } dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole"); } while (this) { if (this->fn->ofs > fn_end) break; dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n", this->version, this->fn->ofs, this->fn->size); if (this->version == tn->version) { /* Version number collision means REF_PRISTINE GC. Accept either of them as long as the CRC is correct. Check the one we have already... */ if (!check_tn_node(c, this)) { /* The one we already had was OK. Keep it and throw away the new one */ dbg_readinode("Like old node. Throw away new\n"); jffs2_kill_tn(c, tn); return 0; } else { /* Who cares if the new one is good; keep it for now anyway. */ dbg_readinode("Like new node. Throw away old\n"); rb_replace_node(&this->rb, &tn->rb, &rii->tn_root); jffs2_kill_tn(c, this); /* Same overlapping from in front and behind */ return 0; } } if (this->version < tn->version && this->fn->ofs >= tn->fn->ofs && this->fn->ofs + this->fn->size <= fn_end) { /* New node entirely overlaps 'this' */ if (check_tn_node(c, tn)) { dbg_readinode("new node bad CRC\n"); jffs2_kill_tn(c, tn); return 0; } /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */ while (this && this->fn->ofs + this->fn->size <= fn_end) { struct jffs2_tmp_dnode_info *next = tn_next(this); if (this->version < tn->version) { tn_erase(this, &rii->tn_root); dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n", this->version, this->fn->ofs, this->fn->ofs+this->fn->size); jffs2_kill_tn(c, this); } this = next; } dbg_readinode("Done killing overlapped nodes\n"); continue; } if (this->version > tn->version && this->fn->ofs <= tn->fn->ofs && this->fn->ofs+this->fn->size >= fn_end) { /* New node entirely overlapped by 'this' */ if (!check_tn_node(c, this)) { dbg_readinode("Good CRC on old node. Kill new\n"); jffs2_kill_tn(c, tn); return 0; } /* ... but 'this' was bad. Replace it... */ dbg_readinode("Bad CRC on old overlapping node. Kill it\n"); tn_erase(this, &rii->tn_root); jffs2_kill_tn(c, this); break; } this = tn_next(this); } /* We neither completely obsoleted nor were completely obsoleted by an earlier node. Insert into the tree */ { struct rb_node *parent; struct rb_node **link = &rii->tn_root.rb_node; struct jffs2_tmp_dnode_info *insert_point = NULL; while (*link) { parent = *link; insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); if (tn->fn->ofs > insert_point->fn->ofs) link = &insert_point->rb.rb_right; else if (tn->fn->ofs < insert_point->fn->ofs || tn->fn->size < insert_point->fn->size) link = &insert_point->rb.rb_left; else link = &insert_point->rb.rb_right; } rb_link_node(&tn->rb, &insert_point->rb, link); rb_insert_color(&tn->rb, &rii->tn_root); } /* If there's anything behind that overlaps us, note it */ this = tn_prev(tn); if (this) { while (1) { if (this->fn->ofs + this->fn->size > tn->fn->ofs) { dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n", this, this->version, this->fn->ofs, this->fn->ofs+this->fn->size); tn->overlapped = 1; break; } if (!this->overlapped) break; ptn = tn_prev(this); if (!ptn) { /* * We killed a node which set the overlapped * flags during the scan. Fix it up. */ this->overlapped = 0; break; } this = ptn; } } /* If the new node overlaps anything ahead, note it */ this = tn_next(tn); while (this && this->fn->ofs < fn_end) { this->overlapped = 1; dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n", this->version, this->fn->ofs, this->fn->ofs+this->fn->size); this = tn_next(this); } return 0; }


david woodhousedavid woodhouse91895.43%666.67%
thomas gleixnerthomas gleixner424.37%111.11%
artem bityutskiyartem bityutskiy10.10%111.11%
geert uytterhoevengeert uytterhoeven10.10%111.11%

/* Trivial function to remove the last node in the tree. Which by definition has no right-hand child — so can be removed just by making its left-hand child (if any) take its place under its parent. Since this is only done when we're consuming the whole tree, there's no need to use rb_erase() and let it worry about adjusting colours and balancing the tree. That would just be a waste of time. */
static void eat_last(struct rb_root *root, struct rb_node *node) { struct rb_node *parent = rb_parent(node); struct rb_node **link; /* LAST! */ BUG_ON(node->rb_right); if (!parent) link = &root->rb_node; else if (node == parent->rb_left) link = &parent->rb_left; else link = &parent->rb_right; *link = node->rb_left; if (node->rb_left) node->rb_left->__rb_parent_color = node->__rb_parent_color; }


david woodhousedavid woodhouse9797.98%150.00%
michel lespinassemichel lespinasse22.02%150.00%

/* We put the version tree in reverse order, so we can use the same eat_last() function that we use to consume the tmpnode tree (tn_root). */
static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn) { struct rb_node **link = &ver_root->rb_node; struct rb_node *parent = NULL; struct jffs2_tmp_dnode_info *this_tn; while (*link) { parent = *link; this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); if (tn->version > this_tn->version) link = &parent->rb_left; else link = &parent->rb_right; } dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root); rb_link_node(&tn->rb, parent, link); rb_insert_color(&tn->rb, ver_root); }


david woodhousedavid woodhouse119100.00%1100.00%

/* Build final, normal fragtree from tn tree. It doesn't matter which order we add nodes to the real fragtree, as long as they don't overlap. And having thrown away the majority of overlapped nodes as we went, there really shouldn't be many sets of nodes which do overlap. If we start at the end, we can use the overlap markers -- we can just eat nodes which aren't overlapped, and when we encounter nodes which _do_ overlap we sort them all into a temporary tree in version order before replaying them. */
static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_readinode_info *rii) { struct jffs2_tmp_dnode_info *pen, *last, *this; struct rb_root ver_root = RB_ROOT; uint32_t high_ver = 0; if (rii->mdata_tn) { dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn); high_ver = rii->mdata_tn->version; rii->latest_ref = rii->mdata_tn->fn->raw; } #ifdef JFFS2_DBG_READINODE_MESSAGES this = tn_last(&rii->tn_root); while (this) { dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs, this->fn->ofs+this->fn->size, this->overlapped); this = tn_prev(this); } #endif pen = tn_last(&rii->tn_root); while ((last = pen)) { pen = tn_prev(last); eat_last(&rii->tn_root, &last->rb); ver_insert(&ver_root, last); if (unlikely(last->overlapped)) { if (pen) continue; /* * We killed a node which set the overlapped * flags during the scan. Fix it up. */ last->overlapped = 0; } /* Now we have a bunch of nodes in reverse version order, in the tree at ver_root. Most of the time, there'll actually be only one node in the 'tree', in fact. */ this = tn_last(&ver_root); while (this) { struct jffs2_tmp_dnode_info *vers_next; int ret; vers_next = tn_prev(this); eat_last(&ver_root, &this->rb); if (check_tn_node(c, this)) { dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n", this->version, this->fn->ofs, this->fn->ofs+this->fn->size); jffs2_kill_tn(c, this); } else { if (this->version > high_ver) { /* Note that this is different from the other highest_version, because this one is only counting _valid_ nodes which could give the latest inode metadata */ high_ver = this->version; rii->latest_ref = this->fn->raw; } dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n", this, this->version, this->fn->ofs, this->fn->ofs+this->fn->size, this->overlapped); ret = jffs2_add_full_dnode_to_inode(c, f, this->fn); if (ret) { /* Free the nodes in vers_root; let the caller deal with the rest */ JFFS2_ERROR("Add node to tree failed %d\n", ret); while (1) { vers_next = tn_prev(this); if (check_tn_node(c, this)) jffs2_mark_node_obsolete(c, this->fn->raw); jffs2_free_full_dnode(this->fn); jffs2_free_tmp_dnode_info(this); this = vers_next; if (!this) break; eat_last(&ver_root, &vers_next->rb); } return ret; } jffs2_free_tmp_dnode_info(this); } this = vers_next; } } return 0; }


david woodhousedavid woodhouse42789.89%444.44%
artem bityutskiyartem bityutskiy245.05%333.33%
thomas gleixnerthomas gleixner132.74%111.11%
linus torvaldslinus torvalds112.32%111.11%

static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) { struct jffs2_tmp_dnode_info *tn, *next; rbtree_postorder_for_each_entry_safe(tn, next, list, rb) { jffs2_free_full_dnode(tn->fn); jffs2_free_tmp_dnode_info(tn); } *list = RB_ROOT; }


artem bityutskiyartem bityutskiy2143.75%120.00%
david woodhousedavid woodhouse1531.25%240.00%
cody p schafercody p schafer1020.83%120.00%
venkatesh pallipadivenkatesh pallipadi24.17%120.00%

static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) { struct jffs2_full_dirent *next; while (fd) { next = fd->next; jffs2_free_full_dirent(fd); fd = next; } }


artem bityutskiyartem bityutskiy2567.57%133.33%
linus torvaldslinus torvalds1129.73%133.33%
david woodhousedavid woodhouse12.70%133.33%

/* Returns first valid node after 'ref'. May return 'ref' */
static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) { while (ref && ref->next_in_ino) { if (!ref_obsolete(ref)) return ref; dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)); ref = ref->next_in_ino; } return NULL; }


artem bityutskiyartem bityutskiy4075.47%250.00%
david woodhousedavid woodhouse1120.75%125.00%
linus torvaldslinus torvalds23.77%125.00%

/* * Helper function for jffs2_get_inode_nodes(). * It is called every time an directory entry node is found. * * Returns: 0 on success; * negative error code on failure. */
static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_raw_dirent *rd, size_t read, struct jffs2_readinode_info *rii) { struct jffs2_full_dirent *fd; uint32_t crc; /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ BUG_ON(ref_obsolete(ref)); crc = crc32(0, rd, sizeof(*rd) - 8); if (unlikely(crc != je32_to_cpu(rd->node_crc))) { JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n", ref_offset(ref), je32_to_cpu(rd->node_crc), crc); jffs2_mark_node_obsolete(c, ref); return 0; } /* If we've never checked the CRCs on this node, check them now */ if (ref_flags(ref) == REF_UNCHECKED) { struct jffs2_eraseblock *jeb; int len; /* Sanity check */ if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); jffs2_mark_node_obsolete(c, ref); return 0; } jeb = &c->blocks[ref->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, ref); spin_lock(&c->erase_completion_lock); jeb->used_size += len; jeb->unchecked_size -= len; c->used_size += len; c->unchecked_size -= len; ref->flash_offset = ref_offset(ref)