From 7926b5a325f06745a1bed75bfb4ef814d0ae9d99 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Thu, 30 Oct 2008 10:14:35 +0000 Subject: [ARM] S3C: Move nand headers to arch/arm/plat-s3c/include/plat Move nand headers to arch/arm/plat-s3c/include/plat ready to clean out the old include directories. Signed-off-by: Ben Dooks --- drivers/mtd/nand/s3c2410.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 556139ed1fd..098f5162388 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -45,8 +45,8 @@ #include -#include -#include +#include +#include #ifdef CONFIG_MTD_NAND_S3C2410_HWECC static int hardware_ecc = 1; -- cgit From 59f0cb0fddc14ffc6676ae62e911f8115ebc8ccf Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Oct 2008 11:24:09 +0000 Subject: [ARM] remove memzero() As suggested by Andrew Morton, remove memzero() - it's not supported on other architectures so use of it is a potential build breaking bug. Since the compiler optimizes memset(x,0,n) to __memzero() perfectly well, we don't miss out on the underlying benefits of memzero(). Signed-off-by: Russell King --- drivers/mtd/maps/ixp2000.c | 2 +- drivers/mtd/maps/ixp4xx.c | 2 +- drivers/mtd/nand/s3c2410.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c index dcdb1f17577..3ea1de9be72 100644 --- a/drivers/mtd/maps/ixp2000.c +++ b/drivers/mtd/maps/ixp2000.c @@ -170,7 +170,7 @@ static int ixp2000_flash_probe(struct platform_device *dev) err = -ENOMEM; goto Error; } - memzero(info, sizeof(struct ixp2000_flash_info)); + memset(info, 0, sizeof(struct ixp2000_flash_info)); platform_set_drvdata(dev, info); diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index 9c7a5fbd4e5..16555cbeaea 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c @@ -201,7 +201,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev) err = -ENOMEM; goto Error; } - memzero(info, sizeof(struct ixp4xx_flash_info)); + memset(info, 0, sizeof(struct ixp4xx_flash_info)); platform_set_drvdata(dev, info); diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 556139ed1fd..bc9aa64bf18 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -818,7 +818,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, goto exit_error; } - memzero(info, sizeof(*info)); + memset(info, 0, sizeof(*info)); platform_set_drvdata(pdev, info); spin_lock_init(&info->controller.lock); @@ -883,7 +883,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, goto exit_error; } - memzero(info->mtds, size); + memset(info->mtds, 0, size); /* initialise all possible chips */ -- cgit From e0d8b13ae1e3ea747620580b6f777992148de182 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 11 Nov 2008 17:52:32 +0000 Subject: [ARM] pxa: don't pass a consumer clock name for devices with unique clocks Where devices only have one consumer, passing a consumer clock ID has no real benefit. Remove it. Signed-off-by: Russell King --- drivers/mtd/nand/pxa3xx_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index c0fa9c9edf0..61c922a8356 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -1079,7 +1079,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) this = &info->nand_chip; mtd->priv = info; - info->clk = clk_get(&pdev->dev, "NANDCLK"); + info->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(info->clk)) { dev_err(&pdev->dev, "failed to get nand clock\n"); ret = PTR_ERR(info->clk); -- cgit From afb5b5c9adb66c73b83dc39319efbacb6fb26a7d Mon Sep 17 00:00:00 2001 From: Eric Miao Date: Mon, 1 Dec 2008 11:43:08 +0800 Subject: [ARM] pxa: explicit #include in various drivers Where 'pxa_dma_desc' and 'pxa_{request,free}_dma' are referenced. Signed-off-by: Eric Miao --- drivers/mtd/nand/pxa3xx_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index c0fa9c9edf0..64b3be1ea0a 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -20,8 +20,8 @@ #include #include #include -#include +#include #include #include -- cgit From ed45819f315b5a8844b5bfce881a18e9f3a055e7 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 12 Nov 2008 10:14:10 +0200 Subject: UBI: fix warnings when debugging is enabled The 'ubi_io_read_vid_hdr()' and 'ubi_io_read_ec_hdr()' function have the 'verbose' argument which controls whether they should print a warning if the VID/EC header was not found or was corrupted. Some callers require the headers to be OK, and pass 1. Some allow a corrupted/not present header, and pass 0. if (UBI_IO_DEBUG) verbose = 1; And UBI_IO_DEBUG is 1 if CONFIG_MTD_UBI_DEBUG_MSG_BLD is true. So in this case the warning is printed all the time. This confuses people. Thus, do not print the messages as warnings if UBI_IO_DEBUG is true, but print them as debugging messages instead. Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/io.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 2fb64be44f1..f60f700256f 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -637,8 +637,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, dbg_io("read EC header from PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); - if (UBI_IO_DEBUG) - verbose = 1; err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); if (err) { @@ -685,6 +683,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, if (verbose) ubi_warn("no EC header found at PEB %d, " "only 0xFF bytes", pnum); + else if (UBI_IO_DEBUG) + dbg_msg("no EC header found at PEB %d, " + "only 0xFF bytes", pnum); return UBI_IO_PEB_EMPTY; } @@ -696,7 +697,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad magic number at PEB %d: %08x instead of " "%08x", pnum, magic, UBI_EC_HDR_MAGIC); ubi_dbg_dump_ec_hdr(ec_hdr); - } + } else if (UBI_IO_DEBUG) + dbg_msg("bad magic number at PEB %d: %08x instead of " + "%08x", pnum, magic, UBI_EC_HDR_MAGIC); return UBI_IO_BAD_EC_HDR; } @@ -708,7 +711,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad EC header CRC at PEB %d, calculated " "%#08x, read %#08x", pnum, crc, hdr_crc); ubi_dbg_dump_ec_hdr(ec_hdr); - } + } else if (UBI_IO_DEBUG) + dbg_msg("bad EC header CRC at PEB %d, calculated " + "%#08x, read %#08x", pnum, crc, hdr_crc); return UBI_IO_BAD_EC_HDR; } @@ -912,8 +917,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, dbg_io("read VID header from PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); - if (UBI_IO_DEBUG) - verbose = 1; p = (char *)vid_hdr - ubi->vid_hdr_shift; err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, @@ -960,6 +963,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, if (verbose) ubi_warn("no VID header found at PEB %d, " "only 0xFF bytes", pnum); + else if (UBI_IO_DEBUG) + dbg_msg("no VID header found at PEB %d, " + "only 0xFF bytes", pnum); return UBI_IO_PEB_FREE; } @@ -971,7 +977,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad magic number at PEB %d: %08x instead of " "%08x", pnum, magic, UBI_VID_HDR_MAGIC); ubi_dbg_dump_vid_hdr(vid_hdr); - } + } else if (UBI_IO_DEBUG) + dbg_msg("bad magic number at PEB %d: %08x instead of " + "%08x", pnum, magic, UBI_VID_HDR_MAGIC); return UBI_IO_BAD_VID_HDR; } @@ -983,7 +991,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad CRC at PEB %d, calculated %#08x, " "read %#08x", pnum, crc, hdr_crc); ubi_dbg_dump_vid_hdr(vid_hdr); - } + } else if (UBI_IO_DEBUG) + dbg_msg("bad CRC at PEB %d, calculated %#08x, " + "read %#08x", pnum, crc, hdr_crc); return UBI_IO_BAD_VID_HDR; } -- cgit From 4df581f3dc6a91a63b9965ac8bdb47d8db294e37 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 4 Dec 2008 20:52:44 +0200 Subject: UBI: fix deadlock We cannot call 'ubi_wl_get_peb()' with @ubi->buf_mutex locked, because 'ubi_wl_get_peb()' may force erasure, which, in turn, may call 'torture_peb()' which also locks the @ubi->buf_mutex and deadlocks. Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/eba.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index d8966bae0e0..2e4d6bf9458 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -504,12 +504,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, if (!vid_hdr) return -ENOMEM; - mutex_lock(&ubi->buf_mutex); - retry: new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); if (new_pnum < 0) { - mutex_unlock(&ubi->buf_mutex); ubi_free_vid_hdr(ubi, vid_hdr); return new_pnum; } @@ -529,20 +526,23 @@ retry: goto write_error; data_size = offset + len; + mutex_lock(&ubi->buf_mutex); memset(ubi->peb_buf1 + offset, 0xFF, len); /* Read everything before the area where the write failure happened */ if (offset > 0) { err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset); if (err && err != UBI_IO_BITFLIPS) - goto out_put; + goto out_unlock; } memcpy(ubi->peb_buf1 + offset, buf, len); err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size); - if (err) + if (err) { + mutex_unlock(&ubi->buf_mutex); goto write_error; + } mutex_unlock(&ubi->buf_mutex); ubi_free_vid_hdr(ubi, vid_hdr); @@ -553,8 +553,9 @@ retry: ubi_msg("data was successfully recovered"); return 0; -out_put: +out_unlock: mutex_unlock(&ubi->buf_mutex); +out_put: ubi_wl_put_peb(ubi, new_pnum, 1); ubi_free_vid_hdr(ubi, vid_hdr); return err; @@ -567,7 +568,6 @@ write_error: ubi_warn("failed to write to PEB %d", new_pnum); ubi_wl_put_peb(ubi, new_pnum, 1); if (++tries > UBI_IO_RETRIES) { - mutex_unlock(&ubi->buf_mutex); ubi_free_vid_hdr(ubi, vid_hdr); return err; } -- cgit From 6a8f483f33a150a0269ad4612621eb6c245eb2cf Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 5 Dec 2008 12:23:48 +0200 Subject: UBI: some code re-structuring Minor code re-structuring and commentaries fixes to improve readability. Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/wl.c | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index dcb6dac1dc5..667f5f451c2 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -359,19 +359,18 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) * @ubi: UBI device description object * @e: the physical eraseblock to add * @pe: protection entry object to use - * @abs_ec: absolute erase counter value when this physical eraseblock has - * to be removed from the protection trees. + * @ec: for how many erase operations this PEB should be protected * * @wl->lock has to be locked. */ static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, - struct ubi_wl_prot_entry *pe, int abs_ec) + struct ubi_wl_prot_entry *pe, int ec) { struct rb_node **p, *parent = NULL; struct ubi_wl_prot_entry *pe1; pe->e = e; - pe->abs_ec = ubi->abs_ec + abs_ec; + pe->abs_ec = ubi->abs_ec + ec; p = &ubi->prot.pnum.rb_node; while (*p) { @@ -739,7 +738,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int cancel) { - int err, put = 0, scrubbing = 0, protect = 0; + int err, put = 0, scrubbing = 0; struct ubi_wl_prot_entry *uninitialized_var(pe); struct ubi_wl_entry *e1, *e2; struct ubi_vid_hdr *vid_hdr; @@ -864,17 +863,28 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, goto out_error; } - protect = 1; + ubi_free_vid_hdr(ubi, vid_hdr); + spin_lock(&ubi->wl_lock); + prot_tree_add(ubi, e1, pe, U_PROTECTION); + ubi_assert(!ubi->move_to_put); + ubi->move_from = ubi->move_to = NULL; + ubi->wl_scheduled = 0; + spin_unlock(&ubi->wl_lock); + + err = schedule_erase(ubi, e2, 0); + if (err) + goto out_error; + mutex_unlock(&ubi->move_mutex); + return 0; } + /* The PEB has been successfully moved */ ubi_free_vid_hdr(ubi, vid_hdr); - if (scrubbing && !protect) + if (scrubbing) ubi_msg("scrubbed PEB %d, data moved to PEB %d", e1->pnum, e2->pnum); spin_lock(&ubi->wl_lock); - if (protect) - prot_tree_add(ubi, e1, pe, protect); if (!ubi->move_to_put) wl_tree_add(e2, &ubi->used); else @@ -883,6 +893,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); + err = schedule_erase(ubi, e1, 0); + if (err) + goto out_error; + if (put) { /* * Well, the target PEB was put meanwhile, schedule it for @@ -894,13 +908,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, goto out_error; } - if (!protect) { - err = schedule_erase(ubi, e1, 0); - if (err) - goto out_error; - } - - dbg_wl("done"); mutex_unlock(&ubi->move_mutex); return 0; -- cgit From 3c98b0a043f25fa44b289c2f35b9d6ad1d859ac9 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 5 Dec 2008 12:42:45 +0200 Subject: UBI: fix error path Make sure the resources had not already been freed before freeing them in the error path of the WL worker function. Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/wl.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 667f5f451c2..442099d76ec 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -738,13 +738,12 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int cancel) { - int err, put = 0, scrubbing = 0; + int err, scrubbing = 0; struct ubi_wl_prot_entry *uninitialized_var(pe); struct ubi_wl_entry *e1, *e2; struct ubi_vid_hdr *vid_hdr; kfree(wrk); - if (cancel) return 0; @@ -864,6 +863,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, } ubi_free_vid_hdr(ubi, vid_hdr); + vid_hdr = NULL; + spin_lock(&ubi->wl_lock); prot_tree_add(ubi, e1, pe, U_PROTECTION); ubi_assert(!ubi->move_to_put); @@ -871,6 +872,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); + e1 = NULL; err = schedule_erase(ubi, e2, 0); if (err) goto out_error; @@ -880,24 +882,27 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, /* The PEB has been successfully moved */ ubi_free_vid_hdr(ubi, vid_hdr); + vid_hdr = NULL; if (scrubbing) ubi_msg("scrubbed PEB %d, data moved to PEB %d", e1->pnum, e2->pnum); spin_lock(&ubi->wl_lock); - if (!ubi->move_to_put) + if (!ubi->move_to_put) { wl_tree_add(e2, &ubi->used); - else - put = 1; + e2 = NULL; + } ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); err = schedule_erase(ubi, e1, 0); - if (err) + if (err) { + e1 = NULL; goto out_error; + } - if (put) { + if (e2) { /* * Well, the target PEB was put meanwhile, schedule it for * erasure. @@ -919,6 +924,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, */ out_not_moved: ubi_free_vid_hdr(ubi, vid_hdr); + vid_hdr = NULL; spin_lock(&ubi->wl_lock); if (scrubbing) wl_tree_add(e1, &ubi->scrub); @@ -928,6 +934,7 @@ out_not_moved: ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); + e1 = NULL; err = schedule_erase(ubi, e2, 0); if (err) goto out_error; @@ -945,8 +952,10 @@ out_error: ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); - kmem_cache_free(ubi_wl_entry_slab, e1); - kmem_cache_free(ubi_wl_entry_slab, e2); + if (e1) + kmem_cache_free(ubi_wl_entry_slab, e1); + if (e2) + kmem_cache_free(ubi_wl_entry_slab, e2); ubi_ro_mode(ubi); mutex_unlock(&ubi->move_mutex); -- cgit From 6fa6f5bbc3a2ad833a3d4b798140602004f70f5a Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 5 Dec 2008 13:37:02 +0200 Subject: UBI: handle write errors in WL worker When a PEB is moved and a write error happens, UBI switches to R/O mode, which is wrong, because we just copy the data and may select a different PEB and re-try this. This patch fixes WL worker's behavior. Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/eba.c | 37 ++++++++++++++++++++++++------------- drivers/mtd/ubi/wl.c | 32 ++++++++++++++++++++------------ 2 files changed, 44 insertions(+), 25 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 2e4d6bf9458..048a606cebd 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -949,10 +949,14 @@ write_error: * This function copies logical eraseblock from physical eraseblock @from to * physical eraseblock @to. The @vid_hdr buffer may be changed by this * function. Returns: - * o %0 in case of success; - * o %1 if the operation was canceled and should be tried later (e.g., - * because a bit-flip was detected at the target PEB); - * o %2 if the volume is being deleted and this LEB should not be moved. + * o %0 in case of success; + * o %1 if the operation was canceled because the volume is being deleted + * or because the PEB was put meanwhile; + * o %2 if the operation was canceled because there was a write error to the + * target PEB; + * o %-EAGAIN if the operation was canceled because a bit-flip was detected + * in the target PEB; + * o a negative error code in case of failure. */ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, struct ubi_vid_hdr *vid_hdr) @@ -978,7 +982,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, /* * Note, we may race with volume deletion, which means that the volume * this logical eraseblock belongs to might be being deleted. Since the - * volume deletion unmaps all the volume's logical eraseblocks, it will + * volume deletion un-maps all the volume's logical eraseblocks, it will * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. */ vol = ubi->volumes[idx]; @@ -986,7 +990,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, /* No need to do further work, cancel */ dbg_eba("volume %d is being removed, cancel", vol_id); spin_unlock(&ubi->volumes_lock); - return 2; + return 1; } spin_unlock(&ubi->volumes_lock); @@ -1023,7 +1027,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, /* * OK, now the LEB is locked and we can safely start moving it. Since - * this function utilizes thie @ubi->peb1_buf buffer which is shared + * this function utilizes the @ubi->peb1_buf buffer which is shared * with some other functions, so lock the buffer by taking the * @ubi->buf_mutex. */ @@ -1068,8 +1072,11 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); - if (err) + if (err) { + if (err == -EIO) + err = 2; goto out_unlock_buf; + } cond_resched(); @@ -1079,14 +1086,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, if (err != UBI_IO_BITFLIPS) ubi_warn("cannot read VID header back from PEB %d", to); else - err = 1; + err = -EAGAIN; goto out_unlock_buf; } if (data_size > 0) { err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); - if (err) + if (err) { + if (err == -EIO) + err = 2; goto out_unlock_buf; + } cond_resched(); @@ -1101,15 +1111,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, ubi_warn("cannot read data back from PEB %d", to); else - err = 1; + err = -EAGAIN; goto out_unlock_buf; } cond_resched(); if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { - ubi_warn("read data back from PEB %d - it is different", - to); + ubi_warn("read data back from PEB %d and it is " + "different", to); + err = -EINVAL; goto out_unlock_buf; } } diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 442099d76ec..abf65ea414e 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -738,7 +738,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int cancel) { - int err, scrubbing = 0; + int err, scrubbing = 0, torture = 0; struct ubi_wl_prot_entry *uninitialized_var(pe); struct ubi_wl_entry *e1, *e2; struct ubi_vid_hdr *vid_hdr; @@ -842,20 +842,26 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); if (err) { - + if (err == -EAGAIN) + goto out_not_moved; if (err < 0) goto out_error; - if (err == 1) + if (err == 2) { + /* Target PEB write error, torture it */ + torture = 1; goto out_not_moved; + } /* - * For some reason the LEB was not moved - it might be because - * the volume is being deleted. We should prevent this PEB from - * being selected for wear-levelling movement for some "time", - * so put it to the protection tree. + * The LEB has not been moved because the volume is being + * deleted or the PEB has been put meanwhile. We should prevent + * this PEB from being selected for wear-leveling movement + * again, so put it to the protection tree. */ - dbg_wl("cancelled moving PEB %d", e1->pnum); + dbg_wl("canceled moving PEB %d", e1->pnum); + ubi_assert(err == 1); + pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); if (!pe) { err = -ENOMEM; @@ -920,9 +926,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, /* * For some reasons the LEB was not moved, might be an error, might be * something else. @e1 was not changed, so return it back. @e2 might - * be changed, schedule it for erasure. + * have been changed, schedule it for erasure. */ out_not_moved: + dbg_wl("canceled moving PEB %d", e1->pnum); ubi_free_vid_hdr(ubi, vid_hdr); vid_hdr = NULL; spin_lock(&ubi->wl_lock); @@ -930,12 +937,13 @@ out_not_moved: wl_tree_add(e1, &ubi->scrub); else wl_tree_add(e1, &ubi->used); + ubi_assert(!ubi->move_to_put); ubi->move_from = ubi->move_to = NULL; - ubi->move_to_put = ubi->wl_scheduled = 0; + ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); e1 = NULL; - err = schedule_erase(ubi, e2, 0); + err = schedule_erase(ubi, e2, torture); if (err) goto out_error; @@ -1324,7 +1332,7 @@ int ubi_wl_flush(struct ubi_device *ubi) up_write(&ubi->work_sem); /* - * And in case last was the WL worker and it cancelled the LEB + * And in case last was the WL worker and it canceled the LEB * movement, flush again. */ while (ubi->works_count) { -- cgit From ad5942bad6addcf9697a74413b517d9724d803a4 Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Wed, 10 Dec 2008 10:42:54 +0100 Subject: UBI: return -ENOMEM upon failing vmalloc Return with correct error code (-ENOMEM) from ubi_attach_mtd_dev() upon failing vmalloc(). Signed-off-by: Stefan Roese Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/build.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index c7630a22831..ba0bd3d5775 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -815,19 +815,20 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) if (err) goto out_free; + err = -ENOMEM; ubi->peb_buf1 = vmalloc(ubi->peb_size); if (!ubi->peb_buf1) goto out_free; ubi->peb_buf2 = vmalloc(ubi->peb_size); if (!ubi->peb_buf2) - goto out_free; + goto out_free; #ifdef CONFIG_MTD_UBI_DEBUG mutex_init(&ubi->dbg_buf_mutex); ubi->dbg_peb_buf = vmalloc(ubi->peb_size); if (!ubi->dbg_peb_buf) - goto out_free; + goto out_free; #endif err = attach_by_scanning(ubi); -- cgit From 0b84b5ca43a9c86cfad848c135fdbf7c72af68fa Mon Sep 17 00:00:00 2001 From: David Brownell Date: Wed, 10 Dec 2008 17:35:25 -0800 Subject: ARM: OMAP: switch to standard gpio get/set calls This patch replaces some legacy OMAP GPIO calls with the "new" (not really, any more!) calls that work on most platforms. The calls addressed by this patch are the simple ones to get and set values ... for code that's in mainline, including the implementations of those calls. Except for the declarations and definitions of those calls, all of these changes were performed by a simple SED script. Plus, a few "if() set() else set()" branches were merged by hand. Signed-off-by: David Brownell Signed-off-by: Tony Lindgren --- drivers/mtd/onenand/omap2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index a7e4d985f5e..cf1501d26a3 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -149,7 +149,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) INIT_COMPLETION(c->irq_done); if (c->gpio_irq) { - result = omap_get_gpio_datain(c->gpio_irq); + result = gpio_get_value(c->gpio_irq); if (result == -1) { ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); intr = read_reg(c, ONENAND_REG_INTERRUPT); -- cgit From 40e3925ba15b604c9ff87154d77a914221d11cdc Mon Sep 17 00:00:00 2001 From: David Brownell Date: Wed, 10 Dec 2008 17:35:26 -0800 Subject: ARM: OMAP: switch to gpio_direction_input More switchover to the cross-platform GPIO interface: use gpio_direction_input(), not an OMAP-specific call. Signed-off-by: David Brownell Signed-off-by: Tony Lindgren --- drivers/mtd/onenand/omap2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index cf1501d26a3..197d8e554cc 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -634,7 +634,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) "OneNAND\n", c->gpio_irq); goto err_iounmap; } - omap_set_gpio_direction(c->gpio_irq, 1); + gpio_direction_input(c->gpio_irq); if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq), omap2_onenand_interrupt, IRQF_TRIGGER_RISING, -- cgit From 15f74b0335962e8554c91e52d588dc9f8ee7098d Mon Sep 17 00:00:00 2001 From: David Brownell Date: Wed, 10 Dec 2008 17:35:26 -0800 Subject: ARM: OMAP: use gpio_to_irq Have most uses of OMAP_GPIO_IRQ() use gpio_to_irq() instead. Calls used for table initialization are left alone, at least this time around. (This patch is for code in both the OMAP tree and mainline.) Signed-off-by: David Brownell Signed-off-by: Tony Lindgren --- drivers/mtd/onenand/omap2.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 197d8e554cc..d1e0b8e7224 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -636,7 +636,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) } gpio_direction_input(c->gpio_irq); - if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq), + if ((r = request_irq(gpio_to_irq(c->gpio_irq), omap2_onenand_interrupt, IRQF_TRIGGER_RISING, pdev->dev.driver->name, c)) < 0) goto err_release_gpio; @@ -723,7 +723,7 @@ err_release_dma: if (c->dma_channel != -1) omap_free_dma(c->dma_channel); if (c->gpio_irq) - free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); + free_irq(gpio_to_irq(c->gpio_irq), c); err_release_gpio: if (c->gpio_irq) omap_free_gpio(c->gpio_irq); @@ -760,7 +760,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev) omap2_onenand_shutdown(pdev); platform_set_drvdata(pdev, NULL); if (c->gpio_irq) { - free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); + free_irq(gpio_to_irq(c->gpio_irq), c); omap_free_gpio(c->gpio_irq); } iounmap(c->onenand.base); -- cgit From 70d13e083c8589dd3edc2313777655da39cb3568 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Dec 2008 08:25:16 +0000 Subject: [ARM] netwinder: clean up GPIO naming Netwinder was using gpio_xxx names which could clash with the GPIO layer. Add a 'nw_' prefix to ensure that these remain separate. Signed-off-by: Russell King --- drivers/mtd/maps/dc21285.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c index 3aa018c092f..42969fe051b 100644 --- a/drivers/mtd/maps/dc21285.c +++ b/drivers/mtd/maps/dc21285.c @@ -32,16 +32,15 @@ static struct mtd_info *dc21285_mtd; */ static void nw_en_write(void) { - extern spinlock_t gpio_lock; unsigned long flags; /* * we want to write a bit pattern XXX1 to Xilinx to enable * the write gate, which will be open for about the next 2ms. */ - spin_lock_irqsave(&gpio_lock, flags); - cpld_modify(1, 1); - spin_unlock_irqrestore(&gpio_lock, flags); + spin_lock_irqsave(&nw_gpio_lock, flags); + nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); + spin_unlock_irqrestore(&nw_gpio_lock, flags); /* * let the ISA bus to catch on... -- cgit From 23553b2c08c9b6e96be98c44feb9c5e640d3e789 Mon Sep 17 00:00:00 2001 From: Xiaochuan-Xu Date: Tue, 9 Dec 2008 19:44:12 +0800 Subject: UBI: prepare for protection tree improvements This patch modifies @struct ubi_wl_entry and adds union which contains only one element so far. This is just a preparation for further changes which will kill the protection tree and make UBI use a list instead. Signed-off-by: Xiaochuan-Xu Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/ubi.h | 6 ++++-- drivers/mtd/ubi/wl.c | 45 +++++++++++++++++++++++---------------------- 2 files changed, 27 insertions(+), 24 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 1c3fa18c26a..46a4763f8e7 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -95,7 +95,7 @@ enum { /** * struct ubi_wl_entry - wear-leveling entry. - * @rb: link in the corresponding RB-tree + * @u.rb: link in the corresponding (free/used) RB-tree * @ec: erase counter * @pnum: physical eraseblock number * @@ -104,7 +104,9 @@ enum { * RB-trees. See WL sub-system for details. */ struct ubi_wl_entry { - struct rb_node rb; + union { + struct rb_node rb; + } u; int ec; int pnum; }; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index abf65ea414e..0279bf9dc72 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -220,7 +220,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) struct ubi_wl_entry *e1; parent = *p; - e1 = rb_entry(parent, struct ubi_wl_entry, rb); + e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); if (e->ec < e1->ec) p = &(*p)->rb_left; @@ -235,8 +235,8 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) } } - rb_link_node(&e->rb, parent, p); - rb_insert_color(&e->rb, root); + rb_link_node(&e->u.rb, parent, p); + rb_insert_color(&e->u.rb, root); } /** @@ -331,7 +331,7 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) while (p) { struct ubi_wl_entry *e1; - e1 = rb_entry(p, struct ubi_wl_entry, rb); + e1 = rb_entry(p, struct ubi_wl_entry, u.rb); if (e->pnum == e1->pnum) { ubi_assert(e == e1); @@ -413,14 +413,14 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) struct rb_node *p; struct ubi_wl_entry *e; - e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); + e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); max += e->ec; p = root->rb_node; while (p) { struct ubi_wl_entry *e1; - e1 = rb_entry(p, struct ubi_wl_entry, rb); + e1 = rb_entry(p, struct ubi_wl_entry, u.rb); if (e1->ec >= max) p = p->rb_left; else { @@ -491,12 +491,13 @@ retry: * eraseblock with erase counter greater or equivalent than the * lowest erase counter plus %WL_FREE_MAX_DIFF. */ - first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); - last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); + first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, + u.rb); + last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); if (last->ec - first->ec < WL_FREE_MAX_DIFF) e = rb_entry(ubi->free.rb_node, - struct ubi_wl_entry, rb); + struct ubi_wl_entry, u.rb); else { medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; e = find_wl_entry(&ubi->free, medium_ec); @@ -508,7 +509,7 @@ retry: * For short term data we pick a physical eraseblock with the * lowest erase counter as we expect it will be erased soon. */ - e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); + e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); protect = ST_PROTECTION; break; default: @@ -522,7 +523,7 @@ retry: * be protected from being moved for some time. */ paranoid_check_in_wl_tree(e, &ubi->free); - rb_erase(&e->rb, &ubi->free); + rb_erase(&e->u.rb, &ubi->free); prot_tree_add(ubi, e, pe, protect); dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); @@ -779,7 +780,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * highly worn-out free physical eraseblock. If the erase * counters differ much enough, start wear-leveling. */ - e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); + e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { @@ -788,21 +789,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, goto out_cancel; } paranoid_check_in_wl_tree(e1, &ubi->used); - rb_erase(&e1->rb, &ubi->used); + rb_erase(&e1->u.rb, &ubi->used); dbg_wl("move PEB %d EC %d to PEB %d EC %d", e1->pnum, e1->ec, e2->pnum, e2->ec); } else { /* Perform scrubbing */ scrubbing = 1; - e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); + e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); paranoid_check_in_wl_tree(e1, &ubi->scrub); - rb_erase(&e1->rb, &ubi->scrub); + rb_erase(&e1->u.rb, &ubi->scrub); dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); } paranoid_check_in_wl_tree(e2, &ubi->free); - rb_erase(&e2->rb, &ubi->free); + rb_erase(&e2->u.rb, &ubi->free); ubi->move_from = e1; ubi->move_to = e2; spin_unlock(&ubi->wl_lock); @@ -1012,7 +1013,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi) * erase counter of free physical eraseblocks is greater then * %UBI_WL_THRESHOLD. */ - e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); + e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) @@ -1214,10 +1215,10 @@ retry: } else { if (in_wl_tree(e, &ubi->used)) { paranoid_check_in_wl_tree(e, &ubi->used); - rb_erase(&e->rb, &ubi->used); + rb_erase(&e->u.rb, &ubi->used); } else if (in_wl_tree(e, &ubi->scrub)) { paranoid_check_in_wl_tree(e, &ubi->scrub); - rb_erase(&e->rb, &ubi->scrub); + rb_erase(&e->u.rb, &ubi->scrub); } else { err = prot_tree_del(ubi, e->pnum); if (err) { @@ -1279,7 +1280,7 @@ retry: if (in_wl_tree(e, &ubi->used)) { paranoid_check_in_wl_tree(e, &ubi->used); - rb_erase(&e->rb, &ubi->used); + rb_erase(&e->u.rb, &ubi->used); } else { int err; @@ -1361,11 +1362,11 @@ static void tree_destroy(struct rb_root *root) else if (rb->rb_right) rb = rb->rb_right; else { - e = rb_entry(rb, struct ubi_wl_entry, rb); + e = rb_entry(rb, struct ubi_wl_entry, u.rb); rb = rb_parent(rb); if (rb) { - if (rb->rb_left == &e->rb) + if (rb->rb_left == &e->u.rb) rb->rb_left = NULL; else rb->rb_right = NULL; -- cgit From 7b6c32daec3bff380ced6822002bc352bdf2c982 Mon Sep 17 00:00:00 2001 From: Xiaochuan-Xu Date: Mon, 15 Dec 2008 21:07:41 +0800 Subject: UBI: simplify PEB protection code UBI has 2 RB-trees to implement PEB protection, which is too much for simply prevent PEB from being moved for some time. This patch implements this using lists. The benefits: 1. No need to allocate protection entry on each PEB get. 2. No need to maintain balanced trees and walk them. Signed-off-by: Xiaochuan-Xu Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/ubi.h | 39 +++--- drivers/mtd/ubi/wl.c | 364 +++++++++++++++++++------------------------------- 2 files changed, 162 insertions(+), 241 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 46a4763f8e7..4a8ec485c91 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -73,6 +73,13 @@ */ #define UBI_IO_RETRIES 3 +/* + * Length of the protection queue. The length is effectively equivalent to the + * number of (global) erase cycles PEBs are protected from the wear-leveling + * worker. + */ +#define UBI_PROT_QUEUE_LEN 10 + /* * Error codes returned by the I/O sub-system. * @@ -96,6 +103,7 @@ enum { /** * struct ubi_wl_entry - wear-leveling entry. * @u.rb: link in the corresponding (free/used) RB-tree + * @u.list: link in the protection queue * @ec: erase counter * @pnum: physical eraseblock number * @@ -106,6 +114,7 @@ enum { struct ubi_wl_entry { union { struct rb_node rb; + struct list_head list; } u; int ec; int pnum; @@ -290,7 +299,7 @@ struct ubi_wl_entry; * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling * * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end - * of UBI ititializetion + * of UBI initialization * @vtbl_slots: how many slots are available in the volume table * @vtbl_size: size of the volume table in bytes * @vtbl: in-RAM volume table copy @@ -308,18 +317,17 @@ struct ubi_wl_entry; * @used: RB-tree of used physical eraseblocks * @free: RB-tree of free physical eraseblocks * @scrub: RB-tree of physical eraseblocks which need scrubbing - * @prot: protection trees - * @prot.pnum: protection tree indexed by physical eraseblock numbers - * @prot.aec: protection tree indexed by absolute erase counter value - * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, - * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works - * fields + * @pq: protection queue (contain physical eraseblocks which are temporarily + * protected from the wear-leveling worker) + * @pq_head: protection queue head + * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, + * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works + * fields * @move_mutex: serializes eraseblock moves - * @work_sem: sycnhronizes the WL worker with use tasks + * @work_sem: synchronizes the WL worker with use tasks * @wl_scheduled: non-zero if the wear-leveling was scheduled * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any * physical eraseblock - * @abs_ec: absolute erase counter * @move_from: physical eraseblock from where the data is being moved * @move_to: physical eraseblock where the data is being moved to * @move_to_put: if the "to" PEB was put @@ -353,11 +361,11 @@ struct ubi_wl_entry; * * @peb_buf1: a buffer of PEB size used for different purposes * @peb_buf2: another buffer of PEB size used for different purposes - * @buf_mutex: proptects @peb_buf1 and @peb_buf2 + * @buf_mutex: protects @peb_buf1 and @peb_buf2 * @ckvol_mutex: serializes static volume checking when opening - * @mult_mutex: serializes operations on multiple volumes, like re-nameing + * @mult_mutex: serializes operations on multiple volumes, like re-naming * @dbg_peb_buf: buffer of PEB size used for debugging - * @dbg_buf_mutex: proptects @dbg_peb_buf + * @dbg_buf_mutex: protects @dbg_peb_buf */ struct ubi_device { struct cdev cdev; @@ -394,16 +402,13 @@ struct ubi_device { struct rb_root used; struct rb_root free; struct rb_root scrub; - struct { - struct rb_root pnum; - struct rb_root aec; - } prot; + struct list_head pq[UBI_PROT_QUEUE_LEN]; + int pq_head; spinlock_t wl_lock; struct mutex move_mutex; struct rw_semaphore work_sem; int wl_scheduled; struct ubi_wl_entry **lookuptbl; - unsigned long long abs_ec; struct ubi_wl_entry *move_from; struct ubi_wl_entry *move_to; int move_to_put; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 0279bf9dc72..14901cb82c1 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -22,7 +22,7 @@ * UBI wear-leveling sub-system. * * This sub-system is responsible for wear-leveling. It works in terms of - * physical* eraseblocks and erase counters and knows nothing about logical + * physical eraseblocks and erase counters and knows nothing about logical * eraseblocks, volumes, etc. From this sub-system's perspective all physical * eraseblocks are of two types - used and free. Used physical eraseblocks are * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical @@ -55,8 +55,39 @@ * * As it was said, for the UBI sub-system all physical eraseblocks are either * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while - * used eraseblocks are kept in a set of different RB-trees: @wl->used, - * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. + * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or + * (temporarily) in the @wl->pq queue. + * + * When the WL sub-system returns a physical eraseblock, the physical + * eraseblock is protected from being moved for some "time". For this reason, + * the physical eraseblock is not directly moved from the @wl->free tree to the + * @wl->used tree. There is a protection queue in between where this + * physical eraseblock is temporarily stored (@wl->pq). + * + * All this protection stuff is needed because: + * o we don't want to move physical eraseblocks just after we have given them + * to the user; instead, we first want to let users fill them up with data; + * + * o there is a chance that the user will put the physical eraseblock very + * soon, so it makes sense not to move it for some time, but wait; this is + * especially important in case of "short term" physical eraseblocks. + * + * Physical eraseblocks stay protected only for limited time. But the "time" is + * measured in erase cycles in this case. This is implemented with help of the + * protection queue. Eraseblocks are put to the tail of this queue when they + * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the + * head of the queue on each erase operation (for any eraseblock). So the + * length of the queue defines how may (global) erase cycles PEBs are protected. + * + * To put it differently, each physical eraseblock has 2 main states: free and + * used. The former state corresponds to the @wl->free tree. The latter state + * is split up on several sub-states: + * o the WL movement is allowed (@wl->used tree); + * o the WL movement is temporarily prohibited (@wl->pq queue); + * o scrubbing is needed (@wl->scrub tree). + * + * Depending on the sub-state, wear-leveling entries of the used physical + * eraseblocks may be kept in one of those structures. * * Note, in this implementation, we keep a small in-RAM object for each physical * eraseblock. This is surely not a scalable solution. But it appears to be good @@ -70,9 +101,6 @@ * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we * pick target PEB with an average EC if our PEB is not very "old". This is a * room for future re-works of the WL sub-system. - * - * Note: the stuff with protection trees looks too complex and is difficult to - * understand. Should be fixed. */ #include @@ -84,14 +112,6 @@ /* Number of physical eraseblocks reserved for wear-leveling purposes */ #define WL_RESERVED_PEBS 1 -/* - * How many erase cycles are short term, unknown, and long term physical - * eraseblocks protected. - */ -#define ST_PROTECTION 16 -#define U_PROTECTION 10 -#define LT_PROTECTION 4 - /* * Maximum difference between two erase counters. If this threshold is * exceeded, the WL sub-system starts moving data from used physical @@ -119,65 +139,10 @@ */ #define WL_MAX_FAILURES 32 -/** - * struct ubi_wl_prot_entry - PEB protection entry. - * @rb_pnum: link in the @wl->prot.pnum RB-tree - * @rb_aec: link in the @wl->prot.aec RB-tree - * @abs_ec: the absolute erase counter value when the protection ends - * @e: the wear-leveling entry of the physical eraseblock under protection - * - * When the WL sub-system returns a physical eraseblock, the physical - * eraseblock is protected from being moved for some "time". For this reason, - * the physical eraseblock is not directly moved from the @wl->free tree to the - * @wl->used tree. There is one more tree in between where this physical - * eraseblock is temporarily stored (@wl->prot). - * - * All this protection stuff is needed because: - * o we don't want to move physical eraseblocks just after we have given them - * to the user; instead, we first want to let users fill them up with data; - * - * o there is a chance that the user will put the physical eraseblock very - * soon, so it makes sense not to move it for some time, but wait; this is - * especially important in case of "short term" physical eraseblocks. - * - * Physical eraseblocks stay protected only for limited time. But the "time" is - * measured in erase cycles in this case. This is implemented with help of the - * absolute erase counter (@wl->abs_ec). When it reaches certain value, the - * physical eraseblocks are moved from the protection trees (@wl->prot.*) to - * the @wl->used tree. - * - * Protected physical eraseblocks are searched by physical eraseblock number - * (when they are put) and by the absolute erase counter (to check if it is - * time to move them to the @wl->used tree). So there are actually 2 RB-trees - * storing the protected physical eraseblocks: @wl->prot.pnum and - * @wl->prot.aec. They are referred to as the "protection" trees. The - * first one is indexed by the physical eraseblock number. The second one is - * indexed by the absolute erase counter. Both trees store - * &struct ubi_wl_prot_entry objects. - * - * Each physical eraseblock has 2 main states: free and used. The former state - * corresponds to the @wl->free tree. The latter state is split up on several - * sub-states: - * o the WL movement is allowed (@wl->used tree); - * o the WL movement is temporarily prohibited (@wl->prot.pnum and - * @wl->prot.aec trees); - * o scrubbing is needed (@wl->scrub tree). - * - * Depending on the sub-state, wear-leveling entries of the used physical - * eraseblocks may be kept in one of those trees. - */ -struct ubi_wl_prot_entry { - struct rb_node rb_pnum; - struct rb_node rb_aec; - unsigned long long abs_ec; - struct ubi_wl_entry *e; -}; - /** * struct ubi_work - UBI work description data structure. * @list: a link in the list of pending works * @func: worker function - * @priv: private data of the worker function * @e: physical eraseblock to erase * @torture: if the physical eraseblock has to be tortured * @@ -198,9 +163,11 @@ struct ubi_work { static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root); +static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e); #else #define paranoid_check_ec(ubi, pnum, ec) 0 #define paranoid_check_in_wl_tree(e, root) +#define paranoid_check_in_pq(ubi, e) 0 #endif /** @@ -355,49 +322,24 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) } /** - * prot_tree_add - add physical eraseblock to protection trees. + * prot_queue_add - add physical eraseblock to the protection queue. * @ubi: UBI device description object * @e: the physical eraseblock to add - * @pe: protection entry object to use - * @ec: for how many erase operations this PEB should be protected * - * @wl->lock has to be locked. + * This function adds @e to the tail of the protection queue @ubi->pq, where + * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be + * temporarily protected from the wear-leveling worker. Note, @wl->lock has to + * be locked. */ -static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, - struct ubi_wl_prot_entry *pe, int ec) +static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) { - struct rb_node **p, *parent = NULL; - struct ubi_wl_prot_entry *pe1; + int pq_tail = ubi->pq_head - 1; - pe->e = e; - pe->abs_ec = ubi->abs_ec + ec; - - p = &ubi->prot.pnum.rb_node; - while (*p) { - parent = *p; - pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum); - - if (e->pnum < pe1->e->pnum) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&pe->rb_pnum, parent, p); - rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum); - - p = &ubi->prot.aec.rb_node; - parent = NULL; - while (*p) { - parent = *p; - pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec); - - if (pe->abs_ec < pe1->abs_ec) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&pe->rb_aec, parent, p); - rb_insert_color(&pe->rb_aec, &ubi->prot.aec); + if (pq_tail < 0) + pq_tail = UBI_PROT_QUEUE_LEN - 1; + ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN); + list_add_tail(&e->u.list, &ubi->pq[pq_tail]); + dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); } /** @@ -442,17 +384,12 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) */ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) { - int err, protect, medium_ec; + int err, medium_ec; struct ubi_wl_entry *e, *first, *last; - struct ubi_wl_prot_entry *pe; ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || dtype == UBI_UNKNOWN); - pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); - if (!pe) - return -ENOMEM; - retry: spin_lock(&ubi->wl_lock); if (!ubi->free.rb_node) { @@ -460,16 +397,13 @@ retry: ubi_assert(list_empty(&ubi->works)); ubi_err("no free eraseblocks"); spin_unlock(&ubi->wl_lock); - kfree(pe); return -ENOSPC; } spin_unlock(&ubi->wl_lock); err = produce_free_peb(ubi); - if (err < 0) { - kfree(pe); + if (err < 0) return err; - } goto retry; } @@ -482,7 +416,6 @@ retry: * %WL_FREE_MAX_DIFF. */ e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); - protect = LT_PROTECTION; break; case UBI_UNKNOWN: /* @@ -502,7 +435,6 @@ retry: medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; e = find_wl_entry(&ubi->free, medium_ec); } - protect = U_PROTECTION; break; case UBI_SHORTTERM: /* @@ -510,63 +442,45 @@ retry: * lowest erase counter as we expect it will be erased soon. */ e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); - protect = ST_PROTECTION; break; default: - protect = 0; - e = NULL; BUG(); } + paranoid_check_in_wl_tree(e, &ubi->free); + /* - * Move the physical eraseblock to the protection trees where it will + * Move the physical eraseblock to the protection queue where it will * be protected from being moved for some time. */ - paranoid_check_in_wl_tree(e, &ubi->free); rb_erase(&e->u.rb, &ubi->free); - prot_tree_add(ubi, e, pe, protect); - - dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); + dbg_wl("PEB %d EC %d", e->pnum, e->ec); + prot_queue_add(ubi, e); spin_unlock(&ubi->wl_lock); - return e->pnum; } /** - * prot_tree_del - remove a physical eraseblock from the protection trees + * prot_queue_del - remove a physical eraseblock from the protection queue. * @ubi: UBI device description object * @pnum: the physical eraseblock to remove * - * This function returns PEB @pnum from the protection trees and returns zero - * in case of success and %-ENODEV if the PEB was not found in the protection - * trees. + * This function deletes PEB @pnum from the protection queue and returns zero + * in case of success and %-ENODEV if the PEB was not found. */ -static int prot_tree_del(struct ubi_device *ubi, int pnum) +static int prot_queue_del(struct ubi_device *ubi, int pnum) { - struct rb_node *p; - struct ubi_wl_prot_entry *pe = NULL; - - p = ubi->prot.pnum.rb_node; - while (p) { - - pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); - - if (pnum == pe->e->pnum) - goto found; + struct ubi_wl_entry *e; - if (pnum < pe->e->pnum) - p = p->rb_left; - else - p = p->rb_right; - } + e = ubi->lookuptbl[pnum]; + if (!e) + return -ENODEV; - return -ENODEV; + if (paranoid_check_in_pq(ubi, e)) + return -ENODEV; -found: - ubi_assert(pe->e->pnum == pnum); - rb_erase(&pe->rb_aec, &ubi->prot.aec); - rb_erase(&pe->rb_pnum, &ubi->prot.pnum); - kfree(pe); + list_del(&e->u.list); + dbg_wl("deleted PEB %d from the protection queue", e->pnum); return 0; } @@ -632,47 +546,47 @@ out_free: } /** - * check_protection_over - check if it is time to stop protecting some PEBs. + * serve_prot_queue - check if it is time to stop protecting PEBs. * @ubi: UBI device description object * - * This function is called after each erase operation, when the absolute erase - * counter is incremented, to check if some physical eraseblock have not to be - * protected any longer. These physical eraseblocks are moved from the - * protection trees to the used tree. + * This function is called after each erase operation and removes PEBs from the + * tail of the protection queue. These PEBs have been protected for long enough + * and should be moved to the used tree. */ -static void check_protection_over(struct ubi_device *ubi) +static void serve_prot_queue(struct ubi_device *ubi) { - struct ubi_wl_prot_entry *pe; + struct ubi_wl_entry *e, *tmp; + int count; /* * There may be several protected physical eraseblock to remove, * process them all. */ - while (1) { - spin_lock(&ubi->wl_lock); - if (!ubi->prot.aec.rb_node) { - spin_unlock(&ubi->wl_lock); - break; - } - - pe = rb_entry(rb_first(&ubi->prot.aec), - struct ubi_wl_prot_entry, rb_aec); +repeat: + count = 0; + spin_lock(&ubi->wl_lock); + list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { + dbg_wl("PEB %d EC %d protection over, move to used tree", + e->pnum, e->ec); - if (pe->abs_ec > ubi->abs_ec) { + list_del(&e->u.list); + wl_tree_add(e, &ubi->used); + if (count++ > 32) { + /* + * Let's be nice and avoid holding the spinlock for + * too long. + */ spin_unlock(&ubi->wl_lock); - break; + cond_resched(); + goto repeat; } - - dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu", - pe->e->pnum, ubi->abs_ec, pe->abs_ec); - rb_erase(&pe->rb_aec, &ubi->prot.aec); - rb_erase(&pe->rb_pnum, &ubi->prot.pnum); - wl_tree_add(pe->e, &ubi->used); - spin_unlock(&ubi->wl_lock); - - kfree(pe); - cond_resched(); } + + ubi->pq_head += 1; + if (ubi->pq_head == UBI_PROT_QUEUE_LEN) + ubi->pq_head = 0; + ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); + spin_unlock(&ubi->wl_lock); } /** @@ -680,8 +594,8 @@ static void check_protection_over(struct ubi_device *ubi) * @ubi: UBI device description object * @wrk: the work to schedule * - * This function enqueues a work defined by @wrk to the tail of the pending - * works list. + * This function adds a work defined by @wrk to the tail of the pending works + * list. */ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) { @@ -740,7 +654,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int cancel) { int err, scrubbing = 0, torture = 0; - struct ubi_wl_prot_entry *uninitialized_var(pe); struct ubi_wl_entry *e1, *e2; struct ubi_vid_hdr *vid_hdr; @@ -857,23 +770,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * The LEB has not been moved because the volume is being * deleted or the PEB has been put meanwhile. We should prevent * this PEB from being selected for wear-leveling movement - * again, so put it to the protection tree. + * again, so put it to the protection queue. */ dbg_wl("canceled moving PEB %d", e1->pnum); ubi_assert(err == 1); - pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); - if (!pe) { - err = -ENOMEM; - goto out_error; - } - ubi_free_vid_hdr(ubi, vid_hdr); vid_hdr = NULL; spin_lock(&ubi->wl_lock); - prot_tree_add(ubi, e1, pe, U_PROTECTION); + prot_queue_add(ubi, e1); ubi_assert(!ubi->move_to_put); ubi->move_from = ubi->move_to = NULL; ubi->wl_scheduled = 0; @@ -1075,7 +982,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, kfree(wl_wrk); spin_lock(&ubi->wl_lock); - ubi->abs_ec += 1; wl_tree_add(e, &ubi->free); spin_unlock(&ubi->wl_lock); @@ -1083,7 +989,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, * One more erase operation has happened, take care about * protected physical eraseblocks. */ - check_protection_over(ubi); + serve_prot_queue(ubi); /* And take care about wear-leveling */ err = ensure_wear_leveling(ubi); @@ -1220,7 +1126,7 @@ retry: paranoid_check_in_wl_tree(e, &ubi->scrub); rb_erase(&e->u.rb, &ubi->scrub); } else { - err = prot_tree_del(ubi, e->pnum); + err = prot_queue_del(ubi, e->pnum); if (err) { ubi_err("PEB %d not found", pnum); ubi_ro_mode(ubi); @@ -1284,7 +1190,7 @@ retry: } else { int err; - err = prot_tree_del(ubi, e->pnum); + err = prot_queue_del(ubi, e->pnum); if (err) { ubi_err("PEB %d not found", pnum); ubi_ro_mode(ubi); @@ -1315,7 +1221,7 @@ int ubi_wl_flush(struct ubi_device *ubi) int err; /* - * Erase while the pending works queue is not empty, but not more then + * Erase while the pending works queue is not empty, but not more than * the number of currently pending works. */ dbg_wl("flush (%d pending works)", ubi->works_count); @@ -1461,15 +1367,13 @@ static void cancel_pending(struct ubi_device *ubi) */ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) { - int err; + int err, i; struct rb_node *rb1, *rb2; struct ubi_scan_volume *sv; struct ubi_scan_leb *seb, *tmp; struct ubi_wl_entry *e; - ubi->used = ubi->free = ubi->scrub = RB_ROOT; - ubi->prot.pnum = ubi->prot.aec = RB_ROOT; spin_lock_init(&ubi->wl_lock); mutex_init(&ubi->move_mutex); init_rwsem(&ubi->work_sem); @@ -1483,6 +1387,10 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) if (!ubi->lookuptbl) return err; + for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) + INIT_LIST_HEAD(&ubi->pq[i]); + ubi->pq_head = 0; + list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { cond_resched(); @@ -1577,33 +1485,18 @@ out_free: } /** - * protection_trees_destroy - destroy the protection RB-trees. + * protection_queue_destroy - destroy the protection queue. * @ubi: UBI device description object */ -static void protection_trees_destroy(struct ubi_device *ubi) +static void protection_queue_destroy(struct ubi_device *ubi) { - struct rb_node *rb; - struct ubi_wl_prot_entry *pe; + int i; + struct ubi_wl_entry *e, *tmp; - rb = ubi->prot.aec.rb_node; - while (rb) { - if (rb->rb_left) - rb = rb->rb_left; - else if (rb->rb_right) - rb = rb->rb_right; - else { - pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec); - - rb = rb_parent(rb); - if (rb) { - if (rb->rb_left == &pe->rb_aec) - rb->rb_left = NULL; - else - rb->rb_right = NULL; - } - - kmem_cache_free(ubi_wl_entry_slab, pe->e); - kfree(pe); + for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { + list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { + list_del(&e->u.list); + kmem_cache_free(ubi_wl_entry_slab, e); } } } @@ -1616,7 +1509,7 @@ void ubi_wl_close(struct ubi_device *ubi) { dbg_wl("close the WL sub-system"); cancel_pending(ubi); - protection_trees_destroy(ubi); + protection_queue_destroy(ubi); tree_destroy(&ubi->used); tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); @@ -1686,4 +1579,27 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, return 1; } +/** + * paranoid_check_in_pq - check if wear-leveling entry is in the protection + * queue. + * @ubi: UBI device description object + * @e: the wear-leveling entry to check + * + * This function returns zero if @e is in @ubi->pq and %1 if it is not. + */ +static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e) +{ + struct ubi_wl_entry *p; + int i; + + for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) + list_for_each_entry(p, &ubi->pq[i], u.list) + if (p == e) + return 0; + + ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue", + e->pnum, e->ec); + ubi_dbg_dump_stack(); + return 1; +} #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ -- cgit From cb96cf1ad641334ca605cdf25841ac020d6ae01c Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 11 Nov 2008 15:15:39 +0100 Subject: [ARM] MX3: add NAND support Signed-off-by: Sascha Hauer --- drivers/mtd/nand/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 1c2e9450d66..f8ae0400c49 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -408,7 +408,7 @@ config MTD_NAND_FSL_UPM config MTD_NAND_MXC tristate "MXC NAND support" - depends on ARCH_MX2 + depends on ARCH_MX2 || ARCH_MX3 help This enables the driver for the NAND flash controller on the MXC processors. -- cgit From f2863c54f30cccb50661697a6e4bdcd0ad0b0a6c Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 12:20:51 +0200 Subject: UBI: fix checkpatch.pl warnings Just minor indentation and "over 80 characters" fixes. Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/cdev.c | 3 ++- drivers/mtd/ubi/debug.h | 10 +++++----- drivers/mtd/ubi/io.c | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/mtd') diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index b30a0b83d7f..98cf31ed081 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -721,7 +721,8 @@ static int rename_volumes(struct ubi_device *ubi, * It seems we need to remove volume with name @re->new_name, * if it exists. */ - desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); + desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, + UBI_EXCLUSIVE); if (IS_ERR(desc)) { err = PTR_ERR(desc); if (err == -ENODEV) diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 78e914d23ec..13777e5beac 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h @@ -27,11 +27,11 @@ #define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) #define ubi_assert(expr) do { \ - if (unlikely(!(expr))) { \ - printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ - __func__, __LINE__, current->pid); \ - ubi_dbg_dump_stack(); \ - } \ + if (unlikely(!(expr))) { \ + printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ + __func__, __LINE__, current->pid); \ + ubi_dbg_dump_stack(); \ + } \ } while (0) #define dbg_msg(fmt, ...) \ diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index f60f700256f..a74118c0574 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -1034,7 +1034,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, err = paranoid_check_peb_ec_hdr(ubi, pnum); if (err) - return err > 0 ? -EINVAL: err; + return err > 0 ? -EINVAL : err; vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); vid_hdr->version = UBI_VERSION; -- cgit