*/
const char *yaffs_guts_c_version =
- "$Id: yaffs_guts.c,v 1.98 2009-12-07 01:17:33 charles Exp $";
+ "$Id: yaffs_guts.c,v 1.115 2010-03-07 23:43:34 charles Exp $";
#include "yportenv.h"
+#include "yaffs_trace.h"
#include "yaffsinterface.h"
#include "yaffs_guts.h"
yaffs_FileStructure *fStruct,
__u32 chunkId);
+static void yaffs_SkipRestOfBlock(yaffs_Device *dev);
+static int yaffs_VerifyChunkWritten(yaffs_Device *dev,
+ int chunkInNAND,
+ const __u8 *data,
+ yaffs_ExtendedTags *tags);
+
/* Function to calculate chunk and offset */
static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, int *chunkOut,
for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
dev->tempBuffer[i].line = 0; /* not in use */
dev->tempBuffer[i].buffer = buf =
- YMALLOC_DMA(dev->totalBytesPerChunk);
+ YMALLOC_DMA(dev->param.totalBytesPerChunk);
}
return buf ? YAFFS_OK : YAFFS_FAIL;
return 1;
}
- for (i = 0; i < dev->nShortOpCaches; i++) {
+ for (i = 0; i < dev->param.nShortOpCaches; i++) {
if (dev->srCache[i].data == buffer)
return 1;
}
static Y_INLINE void yaffs_VerifyChunkBitId(yaffs_Device *dev, int blk, int chunk)
{
if (blk < dev->internalStartBlock || blk > dev->internalEndBlock ||
- chunk < 0 || chunk >= dev->nChunksPerBlock) {
+ chunk < 0 || chunk >= dev->param.nChunksPerBlock) {
T(YAFFS_TRACE_ERROR,
(TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),
blk, chunk));
static int yaffs_SkipVerification(yaffs_Device *dev)
{
+ dev=dev;
return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
}
static int yaffs_SkipFullVerification(yaffs_Device *dev)
{
+ dev=dev;
return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_FULL));
}
static int yaffs_SkipNANDVerification(yaffs_Device *dev)
{
+ dev=dev;
return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_NAND));
}
actuallyUsed = bi->pagesInUse - bi->softDeletions;
- if (bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock ||
- bi->softDeletions < 0 || bi->softDeletions > dev->nChunksPerBlock ||
- actuallyUsed < 0 || actuallyUsed > dev->nChunksPerBlock)
+ if (bi->pagesInUse < 0 || bi->pagesInUse > dev->param.nChunksPerBlock ||
+ bi->softDeletions < 0 || bi->softDeletions > dev->param.nChunksPerBlock ||
+ actuallyUsed < 0 || actuallyUsed > dev->param.nChunksPerBlock)
T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR),
n, bi->pagesInUse, bi->softDeletions));
/* Check that the sequence number is valid.
* Ten million is legal, but is very unlikely
*/
- if (dev->isYaffs2 &&
+ if (dev->param.isYaffs2 &&
(bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || bi->blockState == YAFFS_BLOCK_STATE_FULL) &&
(bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000))
T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has suspect sequence number of %d"TENDSTR),
if (!(tags && obj && oh)) {
T(YAFFS_TRACE_VERIFY,
- (TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR),
- (__u32)tags, (__u32)obj, (__u32)oh));
+ (TSTR("Verifying object header tags %p obj %p oh %p"TENDSTR),
+ tags, obj, oh));
return;
}
actualTallness = obj->variant.fileVariant.topLevel;
- if (requiredTallness > actualTallness)
- T(YAFFS_TRACE_VERIFY,
- (TSTR("Obj %d had tnode tallness %d, needs to be %d"TENDSTR),
- obj->objectId, actualTallness, requiredTallness));
-
-
/* Check that the chunks in the tnode tree are all correct.
* We do this by scanning through the tnode tree and
* checking the tags for every chunk match.
/* Check sane object header chunk */
- chunkMin = dev->internalStartBlock * dev->nChunksPerBlock;
- chunkMax = (dev->internalEndBlock+1) * dev->nChunksPerBlock - 1;
+ chunkMin = dev->internalStartBlock * dev->param.nChunksPerBlock;
+ chunkMax = (dev->internalEndBlock+1) * dev->param.nChunksPerBlock - 1;
chunkInRange = (((unsigned)(obj->hdrChunk)) >= chunkMin && ((unsigned)(obj->hdrChunk)) <= chunkMax);
chunkIdOk = chunkInRange || (obj->hdrChunk == 0);
chunkValid = chunkInRange &&
yaffs_CheckChunkBit(dev,
- obj->hdrChunk / dev->nChunksPerBlock,
- obj->hdrChunk % dev->nChunksPerBlock);
+ obj->hdrChunk / dev->param.nChunksPerBlock,
+ obj->hdrChunk % dev->param.nChunksPerBlock);
chunkShouldNotBeDeleted = chunkInRange && !chunkValid;
if (!obj->fake &&
}
+
+static int yaffs_VerifyChunkWritten(yaffs_Device *dev,
+ int chunkInNAND,
+ const __u8 *data,
+ yaffs_ExtendedTags *tags)
+{
+ int retval = YAFFS_OK;
+ yaffs_ExtendedTags tempTags;
+ __u8 *buffer = yaffs_GetTempBuffer(dev,__LINE__);
+ int result;
+
+ result = yaffs_ReadChunkWithTagsFromNAND(dev,chunkInNAND,buffer,&tempTags);
+ if(memcmp(buffer,data,dev->nDataBytesPerChunk) ||
+ tempTags.objectId != tags->objectId ||
+ tempTags.chunkId != tags->chunkId ||
+ tempTags.byteCount != tags->byteCount)
+ retval = YAFFS_FAIL;
+
+ yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+
+ return retval;
+}
+
static int yaffs_WriteNewChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
const __u8 *data,
yaffs_ExtendedTags *tags,
* chunk due to power loss. This checking policy should
* catch that case with very few checks and thus save a
* lot of checks that are most likely not needed.
+ *
+ * Mods to the above
+ * If an erase check fails or the write fails we skip the
+ * rest of the block.
*/
- if (bi->gcPrioritise) {
- yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
- /* try another chunk */
- continue;
- }
/* let's give it a try */
attempts++;
(TSTR("**>> yaffs chunk %d was not erased"
TENDSTR), chunk));
- /* try another chunk */
+ /* If not erased, delete this one,
+ * skip rest of block and
+ * try another chunk */
+ yaffs_DeleteChunk(dev,chunk,1,__LINE__);
+ yaffs_SkipRestOfBlock(dev);
continue;
}
- bi->skipErasedCheck = 1;
}
writeOk = yaffs_WriteChunkWithTagsToNAND(dev, chunk,
data, tags);
+
+ if(!bi->skipErasedCheck)
+ writeOk = yaffs_VerifyChunkWritten(dev, chunk, data, tags);
+
if (writeOk != YAFFS_OK) {
+ /* Clean up aborted write, skip to next block and
+ * try another chunk */
yaffs_HandleWriteChunkError(dev, chunk, erasedOk);
- /* try another chunk */
continue;
}
+ bi->skipErasedCheck = 1;
+
/* Copy the data into the robustification buffer */
yaffs_HandleWriteChunkOk(dev, chunk, data, tags);
return chunk;
}
+
+/*
+ * Oldest Dirty Sequence Number handling.
+ */
+
+/* yaffs_CalcOldestDirtySequence()
+ * yaffs_FindOldestDirtySequence()
+ * Calculate the oldest dirty sequence number if we don't know it.
+ */
+static int yaffs_CalcOldestDirtySequence(yaffs_Device *dev)
+{
+ int i;
+ __u32 seq;
+ yaffs_BlockInfo *b = 0;
+
+ if(!dev->param.isYaffs2)
+ return 0;
+
+ /* Find the oldest dirty sequence number. */
+ seq = dev->sequenceNumber;
+ for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+ b = yaffs_GetBlockInfo(dev, i);
+ if (b->blockState == YAFFS_BLOCK_STATE_FULL &&
+ (b->pagesInUse - b->softDeletions) < dev->param.nChunksPerBlock &&
+ b->sequenceNumber < seq)
+ seq = b->sequenceNumber;
+ }
+ return seq;
+}
+
+
+static void yaffs_FindOldestDirtySequence(yaffs_Device *dev)
+{
+ if(dev->param.isYaffs2 && !dev->oldestDirtySequence)
+ dev->oldestDirtySequence =
+ yaffs_CalcOldestDirtySequence(dev);
+
+#if 0
+ if(!yaffs_SkipVerification(dev) &&
+ dev->oldestDirtySequence != yaffs_CalcOldestDirtySequence(dev))
+ YBUG();
+
+#endif
+}
+
+/*
+ * yaffs_ClearOldestDirtySequence()
+ * Called when a block is erased or marked bad. (ie. when its sequenceNumber
+ * becomes invalid). If the value matches the oldest then we clear
+ * dev->oldestDirtySequence to force its recomputation.
+ */
+static void yaffs_ClearOldestDirtySequence(yaffs_Device *dev, yaffs_BlockInfo *bi)
+{
+
+ if(!dev->param.isYaffs2)
+ return;
+
+ if(!bi || bi->sequenceNumber == dev->oldestDirtySequence)
+ dev->oldestDirtySequence = 0;
+}
+
+/*
+ * yaffs_UpdateOldestDirtySequence()
+ * Update the oldest dirty sequence number whenever we dirty a block.
+ * Only do this if the oldestDirtySequence is actually being tracked.
+ */
+static void yaffs_UpdateOldestDirtySequence(yaffs_Device *dev, yaffs_BlockInfo *bi)
+{
+ if(dev->param.isYaffs2 && dev->oldestDirtySequence){
+ if(dev->oldestDirtySequence > bi->sequenceNumber)
+ dev->oldestDirtySequence = bi->sequenceNumber;
+ }
+}
+
/*
* Block retiring for handling a broken block.
*/
yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
yaffs_InvalidateCheckpoint(dev);
+
+ yaffs_ClearOldestDirtySequence(dev,bi);
if (yaffs_MarkBlockBad(dev, blockInNAND) != YAFFS_OK) {
if (yaffs_EraseBlockInNAND(dev, blockInNAND) != YAFFS_OK) {
TENDSTR), blockInNAND));
} else {
yaffs_ExtendedTags tags;
- int chunkId = blockInNAND * dev->nChunksPerBlock;
+ int chunkId = blockInNAND * dev->param.nChunksPerBlock;
__u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
memset(buffer, 0xff, dev->nDataBytesPerChunk);
yaffs_InitialiseTags(&tags);
tags.sequenceNumber = YAFFS_SEQUENCE_BAD_BLOCK;
- if (dev->writeChunkWithTagsToNAND(dev, chunkId -
+ if (dev->param.writeChunkWithTagsToNAND(dev, chunkId -
dev->chunkOffset, buffer, &tags) != YAFFS_OK)
T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Failed to "
TCONT("write bad block marker to block %d")
const __u8 *data,
const yaffs_ExtendedTags *tags)
{
+ dev=dev;
+ chunkInNAND=chunkInNAND;
+ data=data;
+ tags=tags;
}
static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
const yaffs_ExtendedTags *tags)
{
+ dev=dev;
+ chunkInNAND=chunkInNAND;
+ tags=tags;
}
void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi)
static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
int erasedOk)
{
- int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+ int blockInNAND = chunkInNAND / dev->param.nChunksPerBlock;
yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
yaffs_HandleChunkError(dev, bi);
/* Delete the chunk */
yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
+ yaffs_SkipRestOfBlock(dev);
}
* adds them to the tnode free list.
* Don't use this function directly
*/
+static Y_INLINE int yaffs_CalcTnodeSize(yaffs_Device *dev)
+{
+ int tnodeSize;
+ /* Calculate the tnode size in bytes for variable width tnode support.
+ * Must be a multiple of 32-bits */
+ tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
+ if (tnodeSize < sizeof(yaffs_Tnode))
+ tnodeSize = sizeof(yaffs_Tnode);
+ return tnodeSize;
+}
static int yaffs_CreateTnodes(yaffs_Device *dev, int nTnodes)
{
int i;
- int tnodeSize;
+ int tnodeSize = yaffs_CalcTnodeSize(dev);
yaffs_Tnode *newTnodes;
__u8 *mem;
yaffs_Tnode *curr;
if (nTnodes < 1)
return YAFFS_OK;
- /* Calculate the tnode size in bytes for variable width tnode support.
- * Must be a multiple of 32-bits */
- tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
-
- if (tnodeSize < sizeof(yaffs_Tnode))
- tnodeSize = sizeof(yaffs_Tnode);
/* make these things */
{
yaffs_Tnode *tn = NULL;
+#ifdef CONFIG_YAFFS_VALGRIND_TEST
+ tn = YMALLOC(yaffs_CalcTnodeSize(dev));
+ if(tn)
+ dev->nTnodesCreated++;
+#else
/* If there are none left make more */
if (!dev->freeTnodes)
yaffs_CreateTnodes(dev, YAFFS_ALLOCATION_NTNODES);
dev->freeTnodes = dev->freeTnodes->internal[0];
dev->nFreeTnodes--;
}
-
+#endif
dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
return tn;
static yaffs_Tnode *yaffs_GetTnode(yaffs_Device *dev)
{
yaffs_Tnode *tn = yaffs_GetTnodeRaw(dev);
- int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
-
- if (tnodeSize < sizeof(yaffs_Tnode))
- tnodeSize = sizeof(yaffs_Tnode);
+ int tnodeSize = yaffs_CalcTnodeSize(dev);
if (tn)
memset(tn, 0, tnodeSize);
static void yaffs_FreeTnode(yaffs_Device *dev, yaffs_Tnode *tn)
{
if (tn) {
+#ifdef CONFIG_YAFFS_VALGRIND_TEST
+ YFREE(tn);
+ dev->nTnodesCreated--;
+#else
#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
if (tn->internal[YAFFS_NTNODES_INTERNAL] != 0) {
/* Hoosterman, this thing looks like it is already in the list */
tn->internal[0] = dev->freeTnodes;
dev->freeTnodes = tn;
dev->nFreeTnodes++;
+#endif
}
dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
}
dev->freeTnodes = NULL;
dev->nFreeTnodes = 0;
+ dev->nTnodesCreated = 0;
}
static void yaffs_InitialiseTnodes(yaffs_Device *dev)
int requiredTallness;
int level = fStruct->topLevel;
+ dev=dev;
+
/* Check sane level and chunk Id */
if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
return NULL;
tn->internal[x] = yaffs_GetTnode(dev);
if(!tn->internal[x])
return NULL;
-
} else if (l == 1) {
/* Looking from level 1 at level 0 */
if (passedTn) {
int j;
for (j = 0; theChunk && j < dev->chunkGroupSize; j++) {
- if (yaffs_CheckChunkBit(dev, theChunk / dev->nChunksPerBlock,
- theChunk % dev->nChunksPerBlock)) {
+ if (yaffs_CheckChunkBit(dev, theChunk / dev->param.nChunksPerBlock,
+ theChunk % dev->param.nChunksPerBlock)) {
if(dev->chunkGroupSize == 1)
return theChunk;
T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk));
- theBlock = yaffs_GetBlockInfo(dev, chunk / dev->nChunksPerBlock);
+ theBlock = yaffs_GetBlockInfo(dev, chunk / dev->param.nChunksPerBlock);
if (theBlock) {
theBlock->softDeletions++;
dev->nFreeChunks++;
+ yaffs_UpdateOldestDirtySequence(dev,theBlock);
}
}
* level 0 tnode entries must be zeroed out.
* Could also use this for file deletion, but that's probably better handled
* by a special case.
+ *
+ * This function is recursive. For levels > 0 the function is called again on
+ * any sub-tree. For level == 0 we just check if the sub-tree has data.
+ * If there is no data in a subtree then it is pruned.
*/
static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device *dev, yaffs_Tnode *tn,
if (tn) {
hasData = 0;
- for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
- if (tn->internal[i] && level > 0) {
- tn->internal[i] =
- yaffs_PruneWorker(dev, tn->internal[i],
- level - 1,
- (i == 0) ? del0 : 1);
+ if(level > 0){
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i]) {
+ tn->internal[i] =
+ yaffs_PruneWorker(dev, tn->internal[i],
+ level - 1,
+ (i == 0) ? del0 : 1);
+ }
+
+ if (tn->internal[i])
+ hasData++;
}
+ } else {
+ int tnodeSize_u32 = yaffs_CalcTnodeSize(dev)/sizeof(__u32);
+ __u32 *map = (__u32 *)tn;
- if (tn->internal[i])
- hasData++;
- }
+ for(i = 0; !hasData && i < tnodeSize_u32; i++){
+ if(map[i])
+ hasData++;
+ }
+ }
if (hasData == 0 && del0) {
/* Free and return NULL */
{
yaffs_Object *tn = NULL;
-#ifdef VALGRIND_TEST
+#ifdef CONFIG_YAFFS_VALGRIND_TEST
tn = YMALLOC(sizeof(yaffs_Object));
+ if(tn)
+ dev->nObjectsCreated++;
#else
/* If there are none left make more */
if (!dev->freeObjects)
{
yaffs_Device *dev = tn->myDev;
-#ifdef __KERNEL__
T(YAFFS_TRACE_OS, (TSTR("FreeObject %p inode %p"TENDSTR), tn, tn->myInode));
-#endif
+ if (!tn)
+ YBUG();
if (tn->parent)
YBUG();
if (!ylist_empty(&tn->siblings))
YBUG();
-#ifdef __KERNEL__
if (tn->myInode) {
/* We're still hooked up to a cached inode.
* Don't delete now, but mark for later deletion
tn->deferedFree = 1;
return;
}
-#endif
yaffs_UnhashObject(tn);
-#ifdef VALGRIND_TEST
+#ifdef CONFIG_YAFFS_VALGRIND_TEST
YFREE(tn);
+ dev->nObjectsCreated--;
tn = NULL;
#else
/* Link into the free list. */
dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
}
-#ifdef __KERNEL__
void yaffs_HandleDeferedFree(yaffs_Object *obj)
{
yaffs_FreeObject(obj);
}
-#endif
static void yaffs_DeinitialiseObjects(yaffs_Device *dev)
{
dev->freeObjects = NULL;
dev->nFreeObjects = 0;
+ dev->nObjectsCreated = 0;
}
static void yaffs_InitialiseObjects(yaffs_Device *dev)
static int yaffs_FindNiceObjectBucket(yaffs_Device *dev)
{
- static int x;
int i;
int l = 999;
int lowest = 999999;
- /* First let's see if we can find one that's empty. */
-
- for (i = 0; i < 10 && lowest > 0; i++) {
- x++;
- x %= YAFFS_NOBJECT_BUCKETS;
- if (dev->objectBucket[x].count < lowest) {
- lowest = dev->objectBucket[x].count;
- l = x;
- }
-
- }
- /* If we didn't find an empty list, then try
- * looking a bit further for a short one
+ /* Search for the shortest list or one that
+ * isn't too long.
*/
- for (i = 0; i < 10 && lowest > 3; i++) {
- x++;
- x %= YAFFS_NOBJECT_BUCKETS;
- if (dev->objectBucket[x].count < lowest) {
- lowest = dev->objectBucket[x].count;
- l = x;
+ for (i = 0; i < 10 && lowest > 4; i++) {
+ dev->bucketFinder++;
+ dev->bucketFinder %= YAFFS_NOBJECT_BUCKETS;
+ if (dev->objectBucket[dev->bucketFinder].count < lowest) {
+ lowest = dev->objectBucket[dev->bucketFinder].count;
+ l = dev->bucketFinder;
}
}
if (i) {
in = ylist_entry(i, yaffs_Object, hashLink);
if (in->objectId == number) {
-#ifdef __KERNEL__
+
/* Don't tell the VFS about this one if it is defered free */
if (in->deferedFree)
return NULL;
-#endif
return in;
}
yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
yaffs_ObjectType type)
{
- yaffs_Object *theObject;
+ yaffs_Object *theObject=NULL;
yaffs_Tnode *tn = NULL;
if (number < 0)
+
if (in) {
in->hdrChunk = 0;
in->valid = 1;
}
/* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
- if (obj->myDev->isYaffs2)
+ if (obj->myDev->param.isYaffs2)
unlinkOp = (newDir == obj->myDev->unlinkedDir);
else
unlinkOp = (newDir == obj->myDev->unlinkedDir
if (dev->blockInfo) {
/* Set up dynamic blockinfo stuff. */
- dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; /* round up bytes */
+ dev->chunkBitmapStride = (dev->param.nChunksPerBlock + 7) / 8; /* round up bytes */
dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks);
if (!dev->chunkBits) {
dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks);
static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device *dev,
yaffs_BlockInfo *bi)
{
- int i;
- __u32 seq;
- yaffs_BlockInfo *b;
- if (!dev->isYaffs2)
+ if (!dev->param.isYaffs2)
return 1; /* disqualification only applies to yaffs2. */
if (!bi->hasShrinkHeader)
return 1; /* can gc */
- /* Find the oldest dirty sequence number if we don't know it and save it
- * so we don't have to keep recomputing it.
- */
- if (!dev->oldestDirtySequence) {
- seq = dev->sequenceNumber;
-
- for (i = dev->internalStartBlock; i <= dev->internalEndBlock;
- i++) {
- b = yaffs_GetBlockInfo(dev, i);
- if (b->blockState == YAFFS_BLOCK_STATE_FULL &&
- (b->pagesInUse - b->softDeletions) <
- dev->nChunksPerBlock && b->sequenceNumber < seq) {
- seq = b->sequenceNumber;
- }
- }
- dev->oldestDirtySequence = seq;
- }
+ yaffs_FindOldestDirtySequence(dev);
/* Can't do gc of this block if there are any blocks older than this one that have
* discarded pages.
return (bi->sequenceNumber <= dev->oldestDirtySequence);
}
-/* FindDiretiestBlock is used to select the dirtiest block (or close enough)
+/*
+ * yaffs_FindRefreshBlock()
+ * periodically finds the oldest full block by sequence number for refreshing.
+ * Only for yaffs2.
+ */
+static __u32 yaffs_FindRefreshBlock(yaffs_Device *dev)
+{
+ __u32 b ;
+
+ __u32 oldest = 0;
+ __u32 oldestSequence = 0;
+
+ yaffs_BlockInfo *bi;
+
+ /*
+ * If refresh period < 10 then refreshing is disabled.
+ */
+ if(dev->param.refreshPeriod < 10 ||
+ !dev->param.isYaffs2)
+ return oldest;
+
+ /*
+ * Fix broken values.
+ */
+ if(dev->refreshSkip > dev->param.refreshPeriod)
+ dev->refreshSkip = dev->param.refreshPeriod;
+
+ if(dev->refreshSkip > 0){
+ dev->refreshSkip--;
+ return oldest;
+ }
+
+ /*
+ * Refresh skip is now zero.
+ * We'll do a refresh this time around....
+ * Update the refresh skip and find the oldest block.
+ */
+ dev->refreshSkip = dev->param.refreshPeriod;
+ dev->refreshCount++;
+
+ for (b = dev->internalStartBlock; b <=dev->internalEndBlock; b++){
+
+ bi = yaffs_GetBlockInfo(dev, b);
+
+
+ if (bi->blockState == YAFFS_BLOCK_STATE_FULL){
+
+ if(oldest < 1 ||
+ bi->sequenceNumber < oldestSequence){
+ oldest = b;
+ oldestSequence = bi->sequenceNumber;
+ }
+ }
+ }
+
+ if (oldest > 0) {
+ T(YAFFS_TRACE_GC,
+ (TSTR("GC refresh count %d selected block %d with sequenceNumber %d" TENDSTR),
+ dev->refreshCount, oldest, oldestSequence));
+ }
+
+ return oldest;
+}
+
+/*
+ * FindDiretiestBlock is used to select the dirtiest block (or close enough)
* for garbage collection.
*/
if (!prioritised)
pagesInUse =
- (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
+ (aggressive) ? dev->param.nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
if (aggressive)
iterations =
if (dirtiest > 0) {
T(YAFFS_TRACE_GC,
(TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), dirtiest,
- dev->nChunksPerBlock - pagesInUse, prioritised));
+ dev->param.nChunksPerBlock - pagesInUse, prioritised));
}
- dev->oldestDirtySequence = 0;
-
if (dirtiest > 0)
dev->nonAggressiveSkip = 4;
(TSTR("yaffs_BlockBecameDirty block %d state %d %s"TENDSTR),
blockNo, bi->blockState, (bi->needsRetiring) ? "needs retiring" : ""));
+ yaffs_ClearOldestDirtySequence(dev,bi);
+
bi->blockState = YAFFS_BLOCK_STATE_DIRTY;
if (!bi->needsRetiring) {
if (erasedOk &&
((yaffs_traceMask & YAFFS_TRACE_ERASE) || !yaffs_SkipVerification(dev))) {
int i;
- for (i = 0; i < dev->nChunksPerBlock; i++) {
+ for (i = 0; i < dev->param.nChunksPerBlock; i++) {
if (!yaffs_CheckChunkErased
- (dev, blockNo * dev->nChunksPerBlock + i)) {
+ (dev, blockNo * dev->param.nChunksPerBlock + i)) {
T(YAFFS_TRACE_ERROR,
(TSTR
(">>Block %d erasure supposedly OK, but chunk %d not erased"
if (erasedOk) {
/* Clean it up... */
bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
+ bi->sequenceNumber = 0;
dev->nErasedBlocks++;
bi->pagesInUse = 0;
bi->softDeletions = 0;
T(YAFFS_TRACE_ERASE,
(TSTR("Erased block %d" TENDSTR), blockNo));
} else {
- dev->nFreeChunks -= dev->nChunksPerBlock; /* We lost a block of free space */
+ dev->nFreeChunks -= dev->param.nChunksPerBlock; /* We lost a block of free space */
yaffs_RetireBlock(dev, blockNo);
T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
static int yaffs_CalcCheckpointBlocksRequired(yaffs_Device *dev)
{
if (!dev->nCheckpointBlocksRequired &&
- dev->isYaffs2) {
+ dev->param.isYaffs2) {
/* Not a valid value so recalculate */
int nBytes = 0;
int nBlocks;
- int devBlocks = (dev->endBlock - dev->startBlock + 1);
- int tnodeSize;
-
- tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
-
- if (tnodeSize < sizeof(yaffs_Tnode))
- tnodeSize = sizeof(yaffs_Tnode);
+ int devBlocks = (dev->param.endBlock - dev->param.startBlock + 1);
+ int tnodeSize = yaffs_CalcTnodeSize(dev);
nBytes += sizeof(yaffs_CheckpointValidity);
nBytes += sizeof(yaffs_CheckpointDevice);
/* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
- nBlocks = (nBytes/(dev->nDataBytesPerChunk * dev->nChunksPerBlock)) + 3;
+ nBlocks = (nBytes/(dev->nDataBytesPerChunk * dev->param.nChunksPerBlock)) + 3;
dev->nCheckpointBlocksRequired = nBlocks;
}
static int yaffs_CheckSpaceForAllocation(yaffs_Device *dev)
{
int reservedChunks;
- int reservedBlocks = dev->nReservedBlocks;
+ int reservedBlocks = dev->param.nReservedBlocks;
int checkpointBlocks;
- if (dev->isYaffs2) {
+ if (dev->param.isYaffs2) {
checkpointBlocks = yaffs_CalcCheckpointBlocksRequired(dev) -
dev->blocksInCheckpoint;
if (checkpointBlocks < 0)
checkpointBlocks = 0;
}
- reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->nChunksPerBlock);
+ reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->param.nChunksPerBlock);
return (dev->nFreeChunks > reservedChunks);
}
return -1;
}
- if (dev->nErasedBlocks < dev->nReservedBlocks
+ if (dev->nErasedBlocks < dev->param.nReservedBlocks
&& dev->allocationPage == 0) {
T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR)));
}
if (dev->allocationBlock >= 0) {
bi = yaffs_GetBlockInfo(dev, dev->allocationBlock);
- retVal = (dev->allocationBlock * dev->nChunksPerBlock) +
+ retVal = (dev->allocationBlock * dev->param.nChunksPerBlock) +
dev->allocationPage;
bi->pagesInUse++;
yaffs_SetChunkBit(dev, dev->allocationBlock,
dev->nFreeChunks--;
/* If the block is full set the state to full */
- if (dev->allocationPage >= dev->nChunksPerBlock) {
+ if (dev->allocationPage >= dev->param.nChunksPerBlock) {
bi->blockState = YAFFS_BLOCK_STATE_FULL;
dev->allocationBlock = -1;
}
{
int n;
- n = dev->nErasedBlocks * dev->nChunksPerBlock;
+ n = dev->nErasedBlocks * dev->param.nChunksPerBlock;
if (dev->allocationBlock > 0)
- n += (dev->nChunksPerBlock - dev->allocationPage);
+ n += (dev->param.nChunksPerBlock - dev->allocationPage);
return n;
}
+/*
+ * yaffs_SkipRestOfBlock() skips over the rest of the allocation block
+ * if we don't want to write to it.
+ */
+static void yaffs_SkipRestOfBlock(yaffs_Device *dev)
+{
+ if(dev->allocationBlock > 0){
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->allocationBlock);
+ if(bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING){
+ bi->blockState = YAFFS_BLOCK_STATE_FULL;
+ dev->allocationBlock = -1;
+ }
+ }
+}
+
+
static int yaffs_GarbageCollectBlock(yaffs_Device *dev, int block,
int wholeBlock)
{
yaffs_VerifyBlock(dev, bi, block);
- maxCopies = (wholeBlock) ? dev->nChunksPerBlock : 10;
- oldChunk = block * dev->nChunksPerBlock + dev->gcChunk;
+ maxCopies = (wholeBlock) ? dev->param.nChunksPerBlock : 10;
+ oldChunk = block * dev->param.nChunksPerBlock + dev->gcChunk;
for (/* init already done */;
retVal == YAFFS_OK &&
- dev->gcChunk < dev->nChunksPerBlock &&
+ dev->gcChunk < dev->param.nChunksPerBlock &&
(bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) &&
maxCopies > 0;
dev->gcChunk++, oldChunk++) {
tags.extraIsShrinkHeader = 0;
oh->shadowsObject = 0;
oh->inbandShadowsObject = 0;
+ if(object->variantType == YAFFS_OBJECT_TYPE_FILE)
+ oh->fileSize = object->variant.fileVariant.fileSize;
tags.extraShadows = 0;
yaffs_VerifyObjectHeader(object, oh, &tags, 1);
- }
-
- newChunk =
- yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &tags, 1);
+ newChunk =
+ yaffs_WriteNewChunkWithTagsToNAND(dev,(__u8 *) oh, &tags, 1);
+ } else
+ newChunk =
+ yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &tags, 1);
if (newChunk < 0) {
retVal = YAFFS_FAIL;
object->serial = tags.serialNumber;
} else {
/* It's a data chunk */
- yaffs_PutChunkIntoFile
+ int ok;
+ ok = yaffs_PutChunkIntoFile
(object,
tags.chunkId,
newChunk, 0);
yaffs_VerifyCollectedBlock(dev, bi, block);
- chunksAfter = yaffs_GetErasedChunks(dev);
- if (chunksBefore >= chunksAfter) {
- T(YAFFS_TRACE_GC,
- (TSTR
- ("gc did not increase free chunks before %d after %d"
- TENDSTR), chunksBefore, chunksAfter));
- }
+
/* If the gc completed then clear the current gcBlock so that we find another. */
if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING) {
+ chunksAfter = yaffs_GetErasedChunks(dev);
+ if (chunksBefore >= chunksAfter) {
+ T(YAFFS_TRACE_GC,
+ (TSTR
+ ("gc did not increase free chunks before %d after %d"
+ TENDSTR), chunksBefore, chunksAfter));
+ }
dev->gcBlock = -1;
dev->gcChunk = 0;
}
if (checkpointBlockAdjust < 0)
checkpointBlockAdjust = 0;
- if (dev->nErasedBlocks < (dev->nReservedBlocks + checkpointBlockAdjust + 2)) {
- /* We need a block soon...*/
+ /* If we need a block soon then do aggressive gc.*/
+ if (dev->nErasedBlocks < (dev->param.nReservedBlocks + checkpointBlockAdjust + 2))
aggressive = 1;
- } else {
- /* We're in no hurry */
+ else
aggressive = 0;
- }
- if (dev->gcBlock <= 0) {
+ /* If we don't already have a block being gc'd then see if we should start another */
+
+ if (dev->gcBlock < 1 && !aggressive) {
+ dev->gcBlock = yaffs_FindRefreshBlock(dev);
+ dev->gcChunk = 0;
+ }
+ if (dev->gcBlock < 1) {
dev->gcBlock = yaffs_FindBlockForGarbageCollection(dev, aggressive);
dev->gcChunk = 0;
}
gcOk = yaffs_GarbageCollectBlock(dev, block, aggressive);
}
- if (dev->nErasedBlocks < (dev->nReservedBlocks) && block > 0) {
+ if (dev->nErasedBlocks < (dev->param.nReservedBlocks) && block > 0) {
T(YAFFS_TRACE_GC,
(TSTR
("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d"
TENDSTR), dev->nErasedBlocks, maxTries, block));
}
- } while ((dev->nErasedBlocks < dev->nReservedBlocks) &&
+ } while ((dev->nErasedBlocks < dev->param.nReservedBlocks) &&
(block > 0) &&
(maxTries < 2));
theChunk = yaffs_GetChunkGroupBase(dev, tn, chunk);
if (yaffs_CheckChunkBits
- (dev, theChunk / dev->nChunksPerBlock,
- theChunk % dev->nChunksPerBlock)) {
+ (dev, theChunk / dev->param.nChunksPerBlock,
+ theChunk % dev->param.nChunksPerBlock)) {
yaffs_ReadChunkTagsFromNAND(in->myDev, theChunk,
tags,
if(!chunkInNAND)
/* Dummy insert, bail now */
return YAFFS_OK;
-
existingChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
}
if ((inScan > 0) &&
- (in->myDev->isYaffs2 ||
+ (in->myDev->param.isYaffs2 ||
existingChunk <= 0 ||
((existingSerial + 1) & 3) == newSerial)) {
/* Forward scanning.
return;
dev->nDeletions++;
- block = chunkId / dev->nChunksPerBlock;
- page = chunkId % dev->nChunksPerBlock;
+ block = chunkId / dev->param.nChunksPerBlock;
+ page = chunkId % dev->param.nChunksPerBlock;
if (!yaffs_CheckChunkBit(dev, block, page))
chunkId));
bi = yaffs_GetBlockInfo(dev, block);
+
+ yaffs_UpdateOldestDirtySequence(dev,bi);
T(YAFFS_TRACE_DELETION,
(TSTR("line %d delete of chunk %d" TENDSTR), lyn, chunkId));
if (markNAND &&
- bi->blockState != YAFFS_BLOCK_STATE_COLLECTING && !dev->isYaffs2) {
+ bi->blockState != YAFFS_BLOCK_STATE_COLLECTING && !dev->param.isYaffs2) {
yaffs_InitialiseTags(&tags);
yaffs_CheckGarbageCollection(dev);
- /* Get the previous chunk at this location in the file if it exists */
+ /* Get the previous chunk at this location in the file if it exists.
+ * If it does not exist then put a zero into the tree. This creates
+ * the tnode now, rather than later when it is harder to clean up.
+ */
prevChunkId = yaffs_FindChunkInFile(in, chunkInInode, &prevTags);
+ if(prevChunkId < 1 &&
+ !yaffs_PutChunkIntoFile(in, chunkInInode, 0, 0))
+ return 0;
/* Set up new tags */
yaffs_InitialiseTags(&newTags);
(prevChunkId > 0) ? prevTags.serialNumber + 1 : 1;
newTags.byteCount = nBytes;
- if (nBytes < 1 || nBytes > dev->totalBytesPerChunk) {
+ if (nBytes < 1 || nBytes > dev->param.totalBytesPerChunk) {
T(YAFFS_TRACE_ERROR,
(TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), nBytes));
YBUG();
}
- /*
- * If there isn't already a chunk there then do a dummy
- * insert to make sue we have the desired tnode structure.
- */
- if(prevChunkId < 1 &&
- yaffs_PutChunkIntoFile(in, chunkInInode, 0, 0) != YAFFS_OK)
- return -1;
newChunkId =
yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
/* If this was a shrink, then mark the block that the chunk lives on */
if (isShrink) {
bi = yaffs_GetBlockInfo(in->myDev,
- newChunkId / in->myDev->nChunksPerBlock);
+ newChunkId / in->myDev->param.nChunksPerBlock);
bi->hasShrinkHeader = 1;
}
yaffs_Device *dev = obj->myDev;
int i;
yaffs_ChunkCache *cache;
- int nCaches = obj->myDev->nShortOpCaches;
+ int nCaches = obj->myDev->param.nShortOpCaches;
for (i = 0; i < nCaches; i++) {
cache = &dev->srCache[i];
int i;
yaffs_ChunkCache *cache;
int chunkWritten = 0;
- int nCaches = obj->myDev->nShortOpCaches;
+ int nCaches = obj->myDev->param.nShortOpCaches;
if (nCaches > 0) {
do {
void yaffs_FlushEntireDeviceCache(yaffs_Device *dev)
{
yaffs_Object *obj;
- int nCaches = dev->nShortOpCaches;
+ int nCaches = dev->param.nShortOpCaches;
int i;
/* Find a dirty object in the cache and flush it...
{
int i;
- if (dev->nShortOpCaches > 0) {
- for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (dev->param.nShortOpCaches > 0) {
+ for (i = 0; i < dev->param.nShortOpCaches; i++) {
if (!dev->srCache[i].object)
return &dev->srCache[i];
}
int i;
int pushout;
- if (dev->nShortOpCaches > 0) {
+ if (dev->param.nShortOpCaches > 0) {
/* Try find a non-dirty one... */
cache = yaffs_GrabChunkCacheWorker(dev);
cache = NULL;
pushout = -1;
- for (i = 0; i < dev->nShortOpCaches; i++) {
+ for (i = 0; i < dev->param.nShortOpCaches; i++) {
if (dev->srCache[i].object &&
!dev->srCache[i].locked &&
(dev->srCache[i].lastUse < usage || !cache)) {
{
yaffs_Device *dev = obj->myDev;
int i;
- if (dev->nShortOpCaches > 0) {
- for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (dev->param.nShortOpCaches > 0) {
+ for (i = 0; i < dev->param.nShortOpCaches; i++) {
if (dev->srCache[i].object == obj &&
dev->srCache[i].chunkId == chunkId) {
dev->cacheHits++;
int isAWrite)
{
- if (dev->nShortOpCaches > 0) {
+ if (dev->param.nShortOpCaches > 0) {
if (dev->srLastUse < 0 || dev->srLastUse > 100000000) {
/* Reset the cache usages */
int i;
- for (i = 1; i < dev->nShortOpCaches; i++)
+ for (i = 1; i < dev->param.nShortOpCaches; i++)
dev->srCache[i].lastUse = 0;
dev->srLastUse = 0;
*/
static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId)
{
- if (object->myDev->nShortOpCaches > 0) {
+ if (object->myDev->param.nShortOpCaches > 0) {
yaffs_ChunkCache *cache = yaffs_FindChunkCache(object, chunkId);
if (cache)
int i;
yaffs_Device *dev = in->myDev;
- if (dev->nShortOpCaches > 0) {
+ if (dev->param.nShortOpCaches > 0) {
/* Invalidate it. */
- for (i = 0; i < dev->nShortOpCaches; i++) {
+ for (i = 0; i < dev->param.nShortOpCaches; i++) {
if (dev->srCache[i].object == in)
dev->srCache[i].object = NULL;
}
cp->nUnlinkedFiles = dev->nUnlinkedFiles;
cp->nBackgroundDeletions = dev->nBackgroundDeletions;
cp->sequenceNumber = dev->sequenceNumber;
- cp->oldestDirtySequence = dev->oldestDirtySequence;
}
dev->nUnlinkedFiles = cp->nUnlinkedFiles;
dev->nBackgroundDeletions = cp->nBackgroundDeletions;
dev->sequenceNumber = cp->sequenceNumber;
- dev->oldestDirtySequence = cp->oldestDirtySequence;
}
int i;
yaffs_Device *dev = in->myDev;
int ok = 1;
- int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
-
- if (tnodeSize < sizeof(yaffs_Tnode))
- tnodeSize = sizeof(yaffs_Tnode);
-
+ int tnodeSize = yaffs_CalcTnodeSize(dev);
if (tn) {
if (level > 0) {
yaffs_FileStructure *fileStructPtr = &obj->variant.fileVariant;
yaffs_Tnode *tn;
int nread = 0;
- int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
-
- if (tnodeSize < sizeof(yaffs_Tnode))
- tnodeSize = sizeof(yaffs_Tnode);
+ int tnodeSize = yaffs_CalcTnodeSize(dev);
ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
cp.structType = sizeof(cp);
T(YAFFS_TRACE_CHECKPOINT, (
- TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %x" TENDSTR),
- cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk, (unsigned) obj));
+ TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %p" TENDSTR),
+ cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk, obj));
ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
if (cp.structType != sizeof(cp)) {
T(YAFFS_TRACE_CHECKPOINT, (TSTR("struct size %d instead of %d ok %d"TENDSTR),
- cp.structType, sizeof(cp), ok));
+ cp.structType, (int)sizeof(cp), ok));
ok = 0;
}
{
int ok = 1;
- if (dev->skipCheckpointWrite || !dev->isYaffs2) {
+ if (dev->param.skipCheckpointWrite || !dev->param.isYaffs2) {
T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint write" TENDSTR)));
ok = 0;
}
{
int ok = 1;
- if (dev->skipCheckpointRead || !dev->isYaffs2) {
+ if (dev->param.skipCheckpointRead || !dev->param.isYaffs2) {
T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint read" TENDSTR)));
ok = 0;
}
dev->blocksInCheckpoint > 0) {
dev->isCheckpointed = 0;
yaffs_CheckpointInvalidateStream(dev);
- if (dev->superBlock && dev->markSuperBlockDirty)
- dev->markSuperBlockDirty(dev->superBlock);
+ if (dev->param.markSuperBlockDirty)
+ dev->param.markSuperBlockDirty(dev);
}
}
* or we're using inband tags then use the cache (if there is caching)
* else bypass the cache.
*/
- if (cache || nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
- if (dev->nShortOpCaches > 0) {
+ if (cache || nToCopy != dev->nDataBytesPerChunk || dev->param.inbandTags) {
+ if (dev->param.nShortOpCaches > 0) {
/* If we can't find the data in the cache, then load it up. */
nToWriteBack = dev->nDataBytesPerChunk;
}
- if (nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
+ if (nToCopy != dev->nDataBytesPerChunk || dev->param.inbandTags) {
/* An incomplete start or end chunk (or maybe both start and end chunk),
* or we're using inband tags, so we want to use the cache buffers.
*/
- if (dev->nShortOpCaches > 0) {
+ if (dev->param.nShortOpCaches > 0) {
yaffs_ChunkCache *cache;
/* If we can't find the data in the cache, then load the cache */
cache = yaffs_FindChunkCache(in, chunk);
chunkId = yaffs_FindAndDeleteChunkInFile(in, i, NULL);
if (chunkId > 0) {
if (chunkId <
- (dev->internalStartBlock * dev->nChunksPerBlock)
+ (dev->internalStartBlock * dev->param.nChunksPerBlock)
|| chunkId >=
((dev->internalEndBlock +
- 1) * dev->nChunksPerBlock)) {
+ 1) * dev->param.nChunksPerBlock)) {
T(YAFFS_TRACE_ALWAYS,
(TSTR("Found daft chunkId %d for %d" TENDSTR),
chunkId, i));
in->variant.fileVariant.fileSize = newSize;
}
-
/* Write a new object header to reflect the resize.
* show we've shrunk the file, if need be
* Do this only if the file is not in the deleted directories
/* First off, invalidate the file's data in the cache, without flushing. */
yaffs_InvalidateWholeChunkCache(in);
- if (in->myDev->isYaffs2 && (in->parent != in->myDev->deletedDir)) {
+ if (in->myDev->param.isYaffs2 && (in->parent != in->myDev->deletedDir)) {
/* Move to the unlinked directory so we have a record that it was deleted. */
yaffs_ChangeObjectName(in, in->myDev->deletedDir, _Y("deleted"), 0, 0);
int retVal;
int immediateDeletion = 0;
-#ifdef __KERNEL__
if (!in->myInode)
immediateDeletion = 1;
-#else
- if (in->inUse <= 0)
- immediateDeletion = 1;
-#endif
if (immediateDeletion) {
retVal =
in->objectId));
in->deleted = 1;
in->myDev->nDeletedFiles++;
- if (1 || in->myDev->isYaffs2)
+ if (1 || in->myDev->param.isYaffs2)
yaffs_ResizeFile(in, 0);
yaffs_SoftDeleteFile(in);
} else {
int immediateDeletion = 0;
-#ifdef __KERNEL__
if (!obj->myInode)
immediateDeletion = 1;
-#else
- if (obj->inUse <= 0)
- immediateDeletion = 1;
-#endif
if(obj)
yaffs_UpdateParent(obj->parent);
* Instead, we do the following:
* - Select a hardlink.
* - Unhook it from the hard links
- * - Unhook it from its parent directory (so that the rename can work)
+ * - Move it from its parent directory (so that the rename can work)
* - Rename the object to the hardlink's name.
* - Delete the hardlink
*/
yaffs_Object *hl;
+ yaffs_Object *parent;
int retVal;
YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
hl = ylist_entry(obj->hardLinks.next, yaffs_Object, hardLinks);
+ yaffs_GetObjectName(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+ parent = hl->parent;
+
ylist_del_init(&hl->hardLinks);
- ylist_del_init(&hl->siblings);
- yaffs_GetObjectName(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_AddObjectToDirectory(obj->myDev->unlinkedDir, hl);
- retVal = yaffs_ChangeObjectName(obj, hl->parent, name, 0, 0);
+ retVal = yaffs_ChangeObjectName(obj,parent, name, 0, 0);
if (retVal == YAFFS_OK)
retVal = yaffs_DoGenericObjectDeletion(hl);
T(YAFFS_TRACE_SCAN_DEBUG,
(TSTR("Block empty " TENDSTR)));
dev->nErasedBlocks++;
- dev->nFreeChunks += dev->nChunksPerBlock;
+ dev->nFreeChunks += dev->param.nChunksPerBlock;
}
}
deleted = 0;
/* For each chunk in each block that needs scanning....*/
- for (c = 0; !alloc_failed && c < dev->nChunksPerBlock &&
+ for (c = 0; !alloc_failed && c < dev->param.nChunksPerBlock &&
state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
/* Read the tags and decide what to do */
- chunk = blk * dev->nChunksPerBlock + c;
+ chunk = blk * dev->param.nChunksPerBlock + c;
result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
&tags);
dev->allocationBlock = blk;
dev->allocationPage = c;
dev->allocationBlockFinder = blk;
- /* Set it to here to encourage the allocator to go forth from here. */
+ /* Set block finder here to encourage the allocator to go forth from here. */
}
- dev->nFreeChunks += (dev->nChunksPerBlock - c);
+ dev->nFreeChunks += (dev->param.nChunksPerBlock - c);
} else if (tags.chunkId > 0) {
/* chunkId > 0 so it is a data chunk... */
unsigned int endpos;
endpos) {
in->variant.fileVariant.
scannedFileSize = endpos;
- if (!dev->useHeaderFileSize) {
+ if (!dev->param.useHeaderFileSize) {
in->variant.fileVariant.
fileSize =
in->variant.fileVariant.
/* Todo got a problem */
break;
case YAFFS_OBJECT_TYPE_FILE:
- if (dev->useHeaderFileSize)
+ if (dev->param.useHeaderFileSize)
in->variant.fileVariant.
fileSize =
break;
}
-/*
- if (parent == dev->deletedDir) {
- yaffs_DestroyObject(in);
- bi->hasShrinkHeader = 1;
- }
-*/
}
}
}
state = YAFFS_BLOCK_STATE_FULL;
}
+ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ /* If the block was partially allocated then treat it as fully allocated.*/
+ state = YAFFS_BLOCK_STATE_FULL;
+ dev->allocationBlock = -1;
+ }
+
bi->blockState = state;
/* Now let's see if it was dirty */
yaffs_BlockIndex *blockIndex = NULL;
int altBlockIndex = 0;
- if (!dev->isYaffs2) {
+ if (!dev->param.isYaffs2) {
T(YAFFS_TRACE_SCAN,
(TSTR("yaffs_ScanBackwards is only for YAFFS2!" TENDSTR)));
return YAFFS_FAIL;
T(YAFFS_TRACE_SCAN_DEBUG,
(TSTR("Block empty " TENDSTR)));
dev->nErasedBlocks++;
- dev->nFreeChunks += dev->nChunksPerBlock;
+ dev->nFreeChunks += dev->param.nChunksPerBlock;
} else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
/* Determine the highest sequence number */
/* For each chunk in each block that needs scanning.... */
foundChunksInBlock = 0;
- for (c = dev->nChunksPerBlock - 1;
+ for (c = dev->param.nChunksPerBlock - 1;
!alloc_failed && c >= 0 &&
(state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
* Read the tags and decide what to do
*/
- chunk = blk * dev->nChunksPerBlock + c;
+ chunk = blk * dev->param.nChunksPerBlock + c;
result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
&tags);
dev->allocationBlockFinder = blk;
} else {
/* This is a partially written block that is not
- * the current allocation block. This block must have
- * had a write failure, so set up for retirement.
+ * the current allocation block.
*/
- /* bi->needsRetiring = 1; ??? TODO */
- bi->gcPrioritise = 1;
-
T(YAFFS_TRACE_ALWAYS,
(TSTR("Partially written block %d detected" TENDSTR),
blk));
if (in &&
in->variantType == YAFFS_OBJECT_TYPE_FILE
- && chunkBase <
- in->variant.fileVariant.shrinkSize) {
+ && chunkBase < in->variant.fileVariant.shrinkSize) {
/* This has not been invalidated by a resize */
- if (!yaffs_PutChunkIntoFile(in, tags.chunkId,
- chunk, -1)) {
+ if (!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk, -1)) {
alloc_failed = 1;
}
/* File size is calculated by looking at the data chunks if we have not
* seen an object header yet. Stop this practice once we find an object header.
*/
- endpos =
- (tags.chunkId -
- 1) * dev->nDataBytesPerChunk +
- tags.byteCount;
+ endpos = chunkBase + tags.byteCount;
if (!in->valid && /* have not got an object header yet */
- in->variant.fileVariant.
- scannedFileSize < endpos) {
- in->variant.fileVariant.
- scannedFileSize = endpos;
- in->variant.fileVariant.
- fileSize =
- in->variant.fileVariant.
- scannedFileSize;
+ in->variant.fileVariant.scannedFileSize < endpos) {
+ in->variant.fileVariant.scannedFileSize = endpos;
+ in->variant.fileVariant.fileSize = endpos;
}
} else if (in) {
- /* This chunk has been invalidated by a resize, so delete */
+ /* This chunk has been invalidated by a resize, or a past file deletion
+ * so delete the chunk*/
yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
}
in = NULL;
if (tags.extraHeaderInfoAvailable) {
- in = yaffs_FindOrCreateObjectByNumber
- (dev, tags.objectId,
- tags.extraObjectType);
+ in = yaffs_FindOrCreateObjectByNumber(dev,
+ tags.objectId,
+ tags.extraObjectType);
if (!in)
alloc_failed = 1;
}
if (!in ||
- (!in->valid && dev->disableLazyLoad) ||
+ (!in->valid && dev->param.disableLazyLoad) ||
tags.extraShadows ||
(!in->valid &&
(tags.objectId == YAFFS_OBJECTID_ROOT ||
oh = (yaffs_ObjectHeader *) chunkData;
- if (dev->inbandTags) {
+ if (dev->param.inbandTags) {
/* Fix up the header if they got corrupted by inband tags */
oh->shadowsObject = oh->inbandShadowsObject;
oh->isShrink = oh->inbandIsShrink;
isShrink = 1;
}
- if (isShrink &&
- in->variant.fileVariant.
- shrinkSize > thisSize) {
- in->variant.fileVariant.
- shrinkSize =
- thisSize;
- }
+ if (isShrink && in->variant.fileVariant.shrinkSize > thisSize)
+ in->variant.fileVariant.shrinkSize = thisSize;
if (isShrink)
bi->hasShrinkHeader = 1;
oh->
shadowsObject,
1);
+
yaffs_SetObjectName(in, oh->name);
* than its current data extents.
*/
in->variant.fileVariant.fileSize = fileSize;
- in->variant.fileVariant.scannedFileSize =
- in->variant.fileVariant.fileSize;
+ in->variant.fileVariant.scannedFileSize = fileSize;
}
- if (isShrink &&
- in->variant.fileVariant.shrinkSize > fileSize) {
+ if (in->variant.fileVariant.shrinkSize > fileSize)
in->variant.fileVariant.shrinkSize = fileSize;
- }
+
break;
case YAFFS_OBJECT_TYPE_HARDLINK:
state = YAFFS_BLOCK_STATE_FULL;
}
+
bi->blockState = state;
/* Now let's see if it was dirty */
}
}
+
+ yaffs_SkipRestOfBlock(dev);
if (altBlockIndex)
YFREE_ALT(blockIndex);
yaffs_VerifyDirectory(parent);
- if (dev && dev->removeObjectCallback)
- dev->removeObjectCallback(obj);
+ if (dev && dev->param.removeObjectCallback)
+ dev->param.removeObjectCallback(obj);
ylist_del_init(&obj->siblings);
{
/* Common functions, gotta have */
- if (!dev->eraseBlockInNAND || !dev->initialiseNAND)
+ if (!dev->param.eraseBlockInNAND || !dev->param.initialiseNAND)
return 0;
#ifdef CONFIG_YAFFS_YAFFS2
/* Can use the "with tags" style interface for yaffs1 or yaffs2 */
- if (dev->writeChunkWithTagsToNAND &&
- dev->readChunkWithTagsFromNAND &&
- !dev->writeChunkToNAND &&
- !dev->readChunkFromNAND &&
- dev->markNANDBlockBad && dev->queryNANDBlock)
+ if (dev->param.writeChunkWithTagsToNAND &&
+ dev->param.readChunkWithTagsFromNAND &&
+ !dev->param.writeChunkToNAND &&
+ !dev->param.readChunkFromNAND &&
+ dev->param.markNANDBlockBad &&
+ dev->param.queryNANDBlock)
return 1;
#endif
/* Can use the "spare" style interface for yaffs1 */
- if (!dev->isYaffs2 &&
- !dev->writeChunkWithTagsToNAND &&
- !dev->readChunkWithTagsFromNAND &&
- dev->writeChunkToNAND &&
- dev->readChunkFromNAND &&
- !dev->markNANDBlockBad && !dev->queryNANDBlock)
+ if (!dev->param.isYaffs2 &&
+ !dev->param.writeChunkWithTagsToNAND &&
+ !dev->param.readChunkWithTagsFromNAND &&
+ dev->param.writeChunkToNAND &&
+ dev->param.readChunkFromNAND &&
+ !dev->param.markNANDBlockBad &&
+ !dev->param.queryNANDBlock)
return 1;
- return 0; /* bad */
+ return 0; /* bad */
}
return YAFFS_FAIL;
}
- dev->internalStartBlock = dev->startBlock;
- dev->internalEndBlock = dev->endBlock;
+ dev->internalStartBlock = dev->param.startBlock;
+ dev->internalEndBlock = dev->param.endBlock;
dev->blockOffset = 0;
dev->chunkOffset = 0;
dev->nFreeChunks = 0;
dev->gcBlock = -1;
- if (dev->startBlock == 0) {
- dev->internalStartBlock = dev->startBlock + 1;
- dev->internalEndBlock = dev->endBlock + 1;
+ if (dev->param.startBlock == 0) {
+ dev->internalStartBlock = dev->param.startBlock + 1;
+ dev->internalEndBlock = dev->param.endBlock + 1;
dev->blockOffset = 1;
- dev->chunkOffset = dev->nChunksPerBlock;
+ dev->chunkOffset = dev->param.nChunksPerBlock;
}
/* Check geometry parameters. */
- if ((!dev->inbandTags && dev->isYaffs2 && dev->totalBytesPerChunk < 1024) ||
- (!dev->isYaffs2 && dev->totalBytesPerChunk < 512) ||
- (dev->inbandTags && !dev->isYaffs2) ||
- dev->nChunksPerBlock < 2 ||
- dev->nReservedBlocks < 2 ||
+ if ((!dev->param.inbandTags && dev->param.isYaffs2 && dev->param.totalBytesPerChunk < 1024) ||
+ (!dev->param.isYaffs2 && dev->param.totalBytesPerChunk < 512) ||
+ (dev->param.inbandTags && !dev->param.isYaffs2) ||
+ dev->param.nChunksPerBlock < 2 ||
+ dev->param.nReservedBlocks < 2 ||
dev->internalStartBlock <= 0 ||
dev->internalEndBlock <= 0 ||
- dev->internalEndBlock <= (dev->internalStartBlock + dev->nReservedBlocks + 2)) { /* otherwise it is too small */
+ dev->internalEndBlock <= (dev->internalStartBlock + dev->param.nReservedBlocks + 2)) { /* otherwise it is too small */
T(YAFFS_TRACE_ALWAYS,
(TSTR
("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s, inbandTags %d "
- TENDSTR), dev->totalBytesPerChunk, dev->isYaffs2 ? "2" : "", dev->inbandTags));
+ TENDSTR), dev->param.totalBytesPerChunk, dev->param.isYaffs2 ? "2" : "", dev->param.inbandTags));
return YAFFS_FAIL;
}
}
/* Sort out space for inband tags, if required */
- if (dev->inbandTags)
- dev->nDataBytesPerChunk = dev->totalBytesPerChunk - sizeof(yaffs_PackedTags2TagsPart);
+ if (dev->param.inbandTags)
+ dev->nDataBytesPerChunk = dev->param.totalBytesPerChunk - sizeof(yaffs_PackedTags2TagsPart);
else
- dev->nDataBytesPerChunk = dev->totalBytesPerChunk;
+ dev->nDataBytesPerChunk = dev->param.totalBytesPerChunk;
/* Got the right mix of functions? */
if (!yaffs_CheckDevFunctions(dev)) {
* We need to find the next power of 2 > than internalEndBlock
*/
- x = dev->nChunksPerBlock * (dev->internalEndBlock + 1);
+ x = dev->param.nChunksPerBlock * (dev->internalEndBlock + 1);
bits = ShiftsGE(x);
/* Set up tnode width if wide tnodes are enabled. */
- if (!dev->wideTnodesDisabled) {
+ if (!dev->param.wideTnodesDisabled) {
/* bits must be even so that we end up with 32-bit words */
if (bits & 1)
bits++;
dev->chunkGroupSize = 1 << dev->chunkGroupBits;
- if (dev->nChunksPerBlock < dev->chunkGroupSize) {
+ if (dev->param.nChunksPerBlock < dev->chunkGroupSize) {
/* We have a problem because the soft delete won't work if
* the chunk group size > chunks per block.
* This can be remedied by using larger "virtual blocks".
dev->nErasedBlocks = 0;
dev->isDoingGC = 0;
dev->hasPendingPrioritisedGCs = 1; /* Assume the worst for now, will get fixed on first GC */
+ dev->oldestDirtySequence = 0;
/* Initialise temporary buffers and caches. */
if (!yaffs_InitialiseTempBuffers(dev))
if (!init_failed &&
- dev->nShortOpCaches > 0) {
+ dev->param.nShortOpCaches > 0) {
int i;
void *buf;
- int srCacheBytes = dev->nShortOpCaches * sizeof(yaffs_ChunkCache);
+ int srCacheBytes = dev->param.nShortOpCaches * sizeof(yaffs_ChunkCache);
- if (dev->nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES)
- dev->nShortOpCaches = YAFFS_MAX_SHORT_OP_CACHES;
+ if (dev->param.nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES)
+ dev->param.nShortOpCaches = YAFFS_MAX_SHORT_OP_CACHES;
dev->srCache = YMALLOC(srCacheBytes);
if (dev->srCache)
memset(dev->srCache, 0, srCacheBytes);
- for (i = 0; i < dev->nShortOpCaches && buf; i++) {
+ for (i = 0; i < dev->param.nShortOpCaches && buf; i++) {
dev->srCache[i].object = NULL;
dev->srCache[i].lastUse = 0;
dev->srCache[i].dirty = 0;
- dev->srCache[i].data = buf = YMALLOC_DMA(dev->totalBytesPerChunk);
+ dev->srCache[i].data = buf = YMALLOC_DMA(dev->param.totalBytesPerChunk);
}
if (!buf)
init_failed = 1;
dev->cacheHits = 0;
if (!init_failed) {
- dev->gcCleanupList = YMALLOC(dev->nChunksPerBlock * sizeof(__u32));
+ dev->gcCleanupList = YMALLOC(dev->param.nChunksPerBlock * sizeof(__u32));
if (!dev->gcCleanupList)
init_failed = 1;
}
- if (dev->isYaffs2)
- dev->useHeaderFileSize = 1;
+ if (dev->param.isYaffs2)
+ dev->param.useHeaderFileSize = 1;
if (!init_failed && !yaffs_InitialiseBlocks(dev))
init_failed = 1;
if (!init_failed) {
/* Now scan the flash. */
- if (dev->isYaffs2) {
+ if (dev->param.isYaffs2) {
if (yaffs_CheckpointRestore(dev)) {
yaffs_CheckObjectDetailsLoaded(dev->rootDir);
T(YAFFS_TRACE_ALWAYS,
dev->nDeletedFiles = 0;
dev->nUnlinkedFiles = 0;
dev->nBackgroundDeletions = 0;
- dev->oldestDirtySequence = 0;
if (!init_failed && !yaffs_InitialiseBlocks(dev))
init_failed = 1;
yaffs_StripDeletedObjects(dev);
yaffs_FixHangingObjects(dev);
- if(dev->emptyLostAndFound)
+ if(dev->param.emptyLostAndFound)
yaffs_EmptyLostAndFound(dev);
}
yaffs_DeinitialiseBlocks(dev);
yaffs_DeinitialiseTnodes(dev);
yaffs_DeinitialiseObjects(dev);
- if (dev->nShortOpCaches > 0 &&
+ if (dev->param.nShortOpCaches > 0 &&
dev->srCache) {
- for (i = 0; i < dev->nShortOpCaches; i++) {
+ for (i = 0; i < dev->param.nShortOpCaches; i++) {
if (dev->srCache[i].data)
YFREE(dev->srCache[i].data);
dev->srCache[i].data = NULL;
dev->isMounted = 0;
- if (dev->deinitialiseNAND)
- dev->deinitialiseNAND(dev);
+ if (dev->param.deinitialiseNAND)
+ dev->param.deinitialiseNAND(dev);
}
}
case YAFFS_BLOCK_STATE_COLLECTING:
case YAFFS_BLOCK_STATE_FULL:
nFree +=
- (dev->nChunksPerBlock - blk->pagesInUse +
+ (dev->param.nChunksPerBlock - blk->pagesInUse +
blk->softDeletions);
break;
default:
/* Now count the number of dirty chunks in the cache and subtract those */
- for (nDirtyCacheChunks = 0, i = 0; i < dev->nShortOpCaches; i++) {
+ for (nDirtyCacheChunks = 0, i = 0; i < dev->param.nShortOpCaches; i++) {
if (dev->srCache[i].dirty)
nDirtyCacheChunks++;
}
nFree -= nDirtyCacheChunks;
- nFree -= ((dev->nReservedBlocks + 1) * dev->nChunksPerBlock);
+ nFree -= ((dev->param.nReservedBlocks + 1) * dev->param.nChunksPerBlock);
/* Now we figure out how much to reserve for the checkpoint and report that... */
blocksForCheckpoint = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
if (blocksForCheckpoint < 0)
blocksForCheckpoint = 0;
- nFree -= (blocksForCheckpoint * dev->nChunksPerBlock);
+ nFree -= (blocksForCheckpoint * dev->param.nChunksPerBlock);
if (nFree < 0)
nFree = 0;
do { \
if (sizeof(structure) != syze) { \
T(YAFFS_TRACE_ALWAYS, (TSTR("%s should be %d but is %d\n" TENDSTR),\
- name, syze, sizeof(structure))); \
+ name, syze, (int) sizeof(structure))); \
return YAFFS_FAIL; \
} \
} while (0)
/* yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion"); */
/* yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare"); */
#ifndef CONFIG_YAFFS_TNODE_LIST_DEBUG
- yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode");
+/* yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode"); */
#endif
#ifndef CONFIG_YAFFS_WINCE
yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader");