+
+ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
+ selected = dev->gc_dirtiest;
+ }
+
+ /*
+ * If nothing has been selected for a while, try selecting the oldest dirty
+ * because that's gumming up the works.
+ */
+
+ if (!selected && dev->param.is_yaffs2 &&
+ dev->gc_not_done >= (background ? 10 : 20)) {
+ yaffs2_find_oldest_dirty_seq(dev);
+ if (dev->oldest_dirty_block > 0) {
+ selected = dev->oldest_dirty_block;
+ dev->gc_dirtiest = selected;
+ dev->oldest_dirty_gc_count++;
+ bi = yaffs_get_block_info(dev, selected);
+ dev->gc_pages_in_use =
+ bi->pages_in_use - bi->soft_del_pages;
+ } else {
+ dev->gc_not_done = 0;
+ }
+ }
+
+ if (selected) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC Selected block %d with %d free, prioritised:%d",
+ selected,
+ dev->param.chunks_per_block - dev->gc_pages_in_use,
+ prioritised);
+
+ dev->n_gc_blocks++;
+ if (background)
+ dev->bg_gcs++;
+
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ dev->gc_not_done = 0;
+ if (dev->refresh_skip > 0)
+ dev->refresh_skip--;
+ } else {
+ dev->gc_not_done++;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
+ dev->gc_block_finder, dev->gc_not_done, threshold,
+ dev->gc_dirtiest, dev->gc_pages_in_use,
+ dev->oldest_dirty_block, background ? " bg" : "");
+ }
+
+ return selected;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and will only accept more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_check_gc(struct yaffs_dev *dev, int background)
+{
+ int aggressive = 0;
+ int gc_ok = YAFFS_OK;
+ int max_tries = 0;
+ int min_erased;
+ int erased_chunks;
+ int checkpt_block_adjust;
+
+ if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
+ return YAFFS_OK;
+
+ if (dev->gc_disable) {
+ /* Bail out so we don't get recursive gc */
+ return YAFFS_OK;
+ }
+
+ /* This loop should pass the first time.
+ * We'll only see looping here if the collection does not increase space.
+ */
+
+ do {
+ max_tries++;
+
+ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
+
+ min_erased =
+ dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
+ erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ /* If we need a block soon then do aggressive gc. */
+ if (dev->n_erased_blocks < min_erased)
+ aggressive = 1;
+ else {
+ if (!background
+ && erased_chunks > (dev->n_free_chunks / 4))
+ break;
+
+ if (dev->gc_skip > 20)
+ dev->gc_skip = 20;
+ if (erased_chunks < dev->n_free_chunks / 2 ||
+ dev->gc_skip < 1 || background)
+ aggressive = 0;
+ else {
+ dev->gc_skip--;
+ break;
+ }
+ }
+
+ dev->gc_skip = 5;
+
+ /* If we don't already have a block being gc'd then see if we should start another */
+
+ if (dev->gc_block < 1 && !aggressive) {
+ dev->gc_block = yaffs2_find_refresh_block(dev);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+ if (dev->gc_block < 1) {
+ dev->gc_block =
+ yaffs_find_gc_block(dev, aggressive, background);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ if (dev->gc_block > 0) {
+ dev->all_gcs++;
+ if (!aggressive)
+ dev->passive_gc_count++;
+
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC n_erased_blocks %d aggressive %d",
+ dev->n_erased_blocks, aggressive);
+
+ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
+ }
+
+ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks)
+ && dev->gc_block > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
+ dev->n_erased_blocks, max_tries,
+ dev->gc_block);
+ }
+ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+ (dev->gc_block > 0) && (max_tries < 2));
+
+ return aggressive ? gc_ok : YAFFS_OK;
+}
+
+/*
+ * yaffs_bg_gc()
+ * Garbage collects. Intended to be called from a background thread.
+ * Returns non-zero if at least half the free chunks are erased.
+ */
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
+{
+ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
+
+ yaffs_check_gc(dev, 1);
+ return erased_chunks > dev->n_free_chunks / 2;
+}
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
+{
+ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+ if (nand_chunk >= 0)
+ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
+ buffer, NULL);
+ else {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not found zero instead",
+ nand_chunk);
+ /* get sane (zero) data if you read a hole */
+ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
+ return 0;
+ }
+
+}
+
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn)
+{
+ int block;
+ int page;
+ struct yaffs_ext_tags tags;
+ struct yaffs_block_info *bi;
+
+ if (chunk_id <= 0)
+ return;
+
+ dev->n_deletions++;
+ block = chunk_id / dev->param.chunks_per_block;
+ page = chunk_id % dev->param.chunks_per_block;
+
+ if (!yaffs_check_chunk_bit(dev, block, page))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Deleting invalid chunk %d", chunk_id);
+
+ bi = yaffs_get_block_info(dev, block);
+
+ yaffs2_update_oldest_dirty_seq(dev, block, bi);
+
+ yaffs_trace(YAFFS_TRACE_DELETION,
+ "line %d delete of chunk %d",
+ lyn, chunk_id);
+
+ if (!dev->param.is_yaffs2 && mark_flash &&
+ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
+
+ yaffs_init_tags(&tags);
+
+ tags.is_deleted = 1;
+
+ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
+ yaffs_handle_chunk_update(dev, chunk_id, &tags);
+ } else {
+ dev->n_unmarked_deletions++;
+ }
+
+ /* Pull out of the management area.
+ * If the whole block became dirty, this will kick off an erasure.
+ */
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
+ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
+ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ dev->n_free_chunks++;
+
+ yaffs_clear_chunk_bit(dev, block, page);
+
+ bi->pages_in_use--;
+
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
+ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ yaffs_block_became_dirty(dev, block);
+ }
+
+ }
+
+}
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 * buffer, int n_bytes, int use_reserve)
+{
+ /* Find old chunk Need to do this to get serial number
+ * Write new one and patch into tree.
+ * Invalidate old tags.
+ */
+
+ int prev_chunk_id;
+ struct yaffs_ext_tags prev_tags;
+
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+
+ struct yaffs_dev *dev = in->my_dev;
+
+ yaffs_check_gc(dev, 0);
+
+ /* Get the previous chunk at this location in the file if it exists.
+ * If it does not exist then put a zero into the tree. This creates
+ * the tnode now, rather than later when it is harder to clean up.
+ */
+ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
+ if (prev_chunk_id < 1 &&
+ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
+ return 0;
+
+ /* Set up new tags */
+ yaffs_init_tags(&new_tags);
+
+ new_tags.chunk_id = inode_chunk;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number =
+ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
+ new_tags.n_bytes = n_bytes;
+
+ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Writing %d bytes to chunk!!!!!!!!!",
+ n_bytes);
+ YBUG();
+ }
+
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
+
+ if (new_chunk_id > 0) {
+ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ yaffs_verify_file_sane(in);