diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/Kconfig | 11 | ||||
-rw-r--r-- | drivers/block/Makefile | 1 | ||||
-rw-r--r-- | drivers/block/cpqarray.c | 4 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 7 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 3 | ||||
-rw-r--r-- | drivers/block/nbd.c | 140 | ||||
-rw-r--r-- | drivers/block/nvme-core.c | 159 | ||||
-rw-r--r-- | drivers/block/nvme-scsi.c | 28 | ||||
-rw-r--r-- | drivers/block/paride/pg.c | 4 | ||||
-rw-r--r-- | drivers/block/pmem.c | 262 | ||||
-rw-r--r-- | drivers/block/rbd.c | 26 | ||||
-rw-r--r-- | drivers/block/swim3.c | 12 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 9 | ||||
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 62 | ||||
-rw-r--r-- | drivers/block/xen-blkback/common.h | 6 | ||||
-rw-r--r-- | drivers/block/xen-blkback/xenbus.c | 43 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 5 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.c | 73 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.h | 1 |
19 files changed, 596 insertions, 260 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 1b8094d4d7af..eb1fed5bd516 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -404,6 +404,17 @@ config BLK_DEV_RAM_DAX and will prevent RAM block device backing store memory from being allocated from highmem (only a problem for highmem systems). +config BLK_DEV_PMEM + tristate "Persistent memory block device support" + help + Saying Y here will allow you to use a contiguous range of reserved + memory as one or more persistent block devices. + + To compile this driver as a module, choose M here: the module will be + called 'pmem'. + + If unsure, say N. + config CDROM_PKTCDVD tristate "Packet writing on CD/DVD media" depends on !UML diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 02b688d1438d..9cc6c18a1c7e 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_PS3_VRAM) += ps3vram.o obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o obj-$(CONFIG_BLK_DEV_RAM) += brd.o +obj-$(CONFIG_BLK_DEV_PMEM) += pmem.o obj-$(CONFIG_BLK_DEV_LOOP) += loop.o obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 2b9440384536..f749df9e15cd 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -405,8 +405,8 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev) goto Enomem4; } hba[i]->access.set_intr_mask(hba[i], 0); - if (request_irq(hba[i]->intr, do_ida_intr, - IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i])) + if (request_irq(hba[i]->intr, do_ida_intr, IRQF_SHARED, + hba[i]->devname, hba[i])) { printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n", hba[i]->intr, hba[i]->devname); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 1fc83427199c..81fde9ef7f8e 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2107,13 +2107,12 @@ static int drbd_create_mempools(void) if (drbd_md_io_page_pool == NULL) goto Enomem; - drbd_request_mempool = mempool_create(number, - mempool_alloc_slab, mempool_free_slab, drbd_request_cache); + drbd_request_mempool = mempool_create_slab_pool(number, + drbd_request_cache); if (drbd_request_mempool == NULL) goto Enomem; - drbd_ee_mempool = mempool_create(number, - mempool_alloc_slab, mempool_free_slab, drbd_ee_cache); + drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache); if (drbd_ee_mempool == NULL) goto Enomem; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 34f2f0ba409b..3907202fb9d9 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -52,9 +52,10 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, { struct drbd_request *req; - req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO); + req = mempool_alloc(drbd_request_mempool, GFP_NOIO); if (!req) return NULL; + memset(req, 0, sizeof(*req)); drbd_req_make_private_bio(req, bio_src); req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index a98c41f72c63..39e5f7fae3ef 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -32,28 +32,36 @@ #include <net/sock.h> #include <linux/net.h> #include <linux/kthread.h> +#include <linux/types.h> #include <asm/uaccess.h> #include <asm/types.h> #include <linux/nbd.h> -#define NBD_MAGIC 0x68797548 +struct nbd_device { + int flags; + int harderror; /* Code of hard error */ + struct socket * sock; /* If == NULL, device is not ready, yet */ + int magic; + + spinlock_t queue_lock; + struct list_head queue_head; /* Requests waiting result */ + struct request *active_req; + wait_queue_head_t active_wq; + struct list_head waiting_queue; /* Requests to be sent */ + wait_queue_head_t waiting_wq; + + struct mutex tx_lock; + struct gendisk *disk; + int blksize; + loff_t bytesize; + pid_t pid; /* pid of nbd-client, if attached */ + int xmit_timeout; + int disconnect; /* a disconnect has been requested by user */ +}; -#ifdef NDEBUG -#define dprintk(flags, fmt...) -#else /* NDEBUG */ -#define dprintk(flags, fmt...) do { \ - if (debugflags & (flags)) printk(KERN_DEBUG fmt); \ -} while (0) -#define DBG_IOCTL 0x0004 -#define DBG_INIT 0x0010 -#define DBG_EXIT 0x0020 -#define DBG_BLKDEV 0x0100 -#define DBG_RX 0x0200 -#define DBG_TX 0x0400 -static unsigned int debugflags; -#endif /* NDEBUG */ +#define NBD_MAGIC 0x68797548 static unsigned int nbds_max = 16; static struct nbd_device *nbd_dev; @@ -71,25 +79,9 @@ static int max_part; */ static DEFINE_SPINLOCK(nbd_lock); -#ifndef NDEBUG -static const char *ioctl_cmd_to_ascii(int cmd) +static inline struct device *nbd_to_dev(struct nbd_device *nbd) { - switch (cmd) { - case NBD_SET_SOCK: return "set-sock"; - case NBD_SET_BLKSIZE: return "set-blksize"; - case NBD_SET_SIZE: return "set-size"; - case NBD_SET_TIMEOUT: return "set-timeout"; - case NBD_SET_FLAGS: return "set-flags"; - case NBD_DO_IT: return "do-it"; - case NBD_CLEAR_SOCK: return "clear-sock"; - case NBD_CLEAR_QUE: return "clear-que"; - case NBD_PRINT_DEBUG: return "print-debug"; - case NBD_SET_SIZE_BLOCKS: return "set-size-blocks"; - case NBD_DISCONNECT: return "disconnect"; - case BLKROSET: return "set-read-only"; - case BLKFLSBUF: return "flush-buffer-cache"; - } - return "unknown"; + return disk_to_dev(nbd->disk); } static const char *nbdcmd_to_ascii(int cmd) @@ -103,30 +95,26 @@ static const char *nbdcmd_to_ascii(int cmd) } return "invalid"; } -#endif /* NDEBUG */ -static void nbd_end_request(struct request *req) +static void nbd_end_request(struct nbd_device *nbd, struct request *req) { int error = req->errors ? -EIO : 0; struct request_queue *q = req->q; unsigned long flags; - dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, - req, error ? "failed" : "done"); + dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req, + error ? "failed" : "done"); spin_lock_irqsave(q->queue_lock, flags); __blk_end_request_all(req, error); spin_unlock_irqrestore(q->queue_lock, flags); } +/* + * Forcibly shutdown the socket causing all listeners to error + */ static void sock_shutdown(struct nbd_device *nbd, int lock) { - /* Forcibly shutdown the socket causing all listeners - * to error - * - * FIXME: This code is duplicated from sys_shutdown, but - * there should be a more generic interface rather than - * calling socket ops directly here */ if (lock) mutex_lock(&nbd->tx_lock); if (nbd->sock) { @@ -253,17 +241,15 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) } memcpy(request.handle, &req, sizeof(req)); - dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", - nbd->disk->disk_name, req, - nbdcmd_to_ascii(nbd_cmd(req)), - (unsigned long long)blk_rq_pos(req) << 9, - blk_rq_bytes(req)); + dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", + req, nbdcmd_to_ascii(nbd_cmd(req)), + (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, 1, &request, sizeof(request), (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); - goto error_out; + return -EIO; } if (nbd_cmd(req) == NBD_CMD_WRITE) { @@ -277,21 +263,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) flags = 0; if (!rq_iter_last(bvec, iter)) flags = MSG_MORE; - dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", - nbd->disk->disk_name, req, bvec.bv_len); + dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", + req, bvec.bv_len); result = sock_send_bvec(nbd, &bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); - goto error_out; + return -EIO; } } } return 0; - -error_out: - return -EIO; } static struct request *nbd_find_request(struct nbd_device *nbd, @@ -302,7 +285,7 @@ static struct request *nbd_find_request(struct nbd_device *nbd, err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); if (unlikely(err)) - goto out; + return ERR_PTR(err); spin_lock(&nbd->queue_lock); list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { @@ -314,10 +297,7 @@ static struct request *nbd_find_request(struct nbd_device *nbd, } spin_unlock(&nbd->queue_lock); - err = -ENOENT; - -out: - return ERR_PTR(err); + return ERR_PTR(-ENOENT); } static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) @@ -371,8 +351,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) return req; } - dprintk(DBG_RX, "%s: request %p: got reply\n", - nbd->disk->disk_name, req); + dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); if (nbd_cmd(req) == NBD_CMD_READ) { struct req_iterator iter; struct bio_vec bvec; @@ -385,8 +364,8 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) req->errors++; return req; } - dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", - nbd->disk->disk_name, req, bvec.bv_len); + dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", + req, bvec.bv_len); } } return req; @@ -426,7 +405,7 @@ static int nbd_do_it(struct nbd_device *nbd) } while ((req = nbd_read_stat(nbd)) != NULL) - nbd_end_request(req); + nbd_end_request(nbd, req); device_remove_file(disk_to_dev(nbd->disk), &pid_attr); nbd->pid = 0; @@ -455,7 +434,7 @@ static void nbd_clear_que(struct nbd_device *nbd) queuelist); list_del_init(&req->queuelist); req->errors++; - nbd_end_request(req); + nbd_end_request(nbd, req); } while (!list_empty(&nbd->waiting_queue)) { @@ -463,7 +442,7 @@ static void nbd_clear_que(struct nbd_device *nbd) queuelist); list_del_init(&req->queuelist); req->errors++; - nbd_end_request(req); + nbd_end_request(nbd, req); } } @@ -507,7 +486,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) if (nbd_send_req(nbd, req) != 0) { dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); req->errors++; - nbd_end_request(req); + nbd_end_request(nbd, req); } else { spin_lock(&nbd->queue_lock); list_add_tail(&req->queuelist, &nbd->queue_head); @@ -522,7 +501,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) error_out: req->errors++; - nbd_end_request(req); + nbd_end_request(nbd, req); } static int nbd_thread(void *data) @@ -570,18 +549,18 @@ static void do_nbd_request(struct request_queue *q) spin_unlock_irq(q->queue_lock); - dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", - req->rq_disk->disk_name, req, req->cmd_type); - nbd = req->rq_disk->private_data; BUG_ON(nbd->magic != NBD_MAGIC); + dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n", + req, req->cmd_type); + if (unlikely(!nbd->sock)) { dev_err(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); req->errors++; - nbd_end_request(req); + nbd_end_request(nbd, req); spin_lock_irq(q->queue_lock); continue; } @@ -706,13 +685,13 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, else blk_queue_flush(nbd->disk->queue, 0); - thread = kthread_create(nbd_thread, nbd, "%s", - nbd->disk->disk_name); + thread = kthread_run(nbd_thread, nbd, "%s", + nbd->disk->disk_name); if (IS_ERR(thread)) { mutex_lock(&nbd->tx_lock); return PTR_ERR(thread); } - wake_up_process(thread); + error = nbd_do_it(nbd); kthread_stop(thread); @@ -768,10 +747,6 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, BUG_ON(nbd->magic != NBD_MAGIC); - /* Anyone capable of this syscall can do *real bad* things */ - dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", - nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); - mutex_lock(&nbd->tx_lock); error = __nbd_ioctl(bdev, nbd, cmd, arg); mutex_unlock(&nbd->tx_lock); @@ -861,7 +836,6 @@ static int __init nbd_init(void) } printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); - dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags); for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; @@ -920,7 +894,3 @@ module_param(nbds_max, int, 0444); MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); module_param(max_part, int, 0444); MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); -#ifndef NDEBUG -module_param(debugflags, int, 0644); -MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); -#endif diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index e23be20a3417..85b8036deaa3 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -44,7 +44,7 @@ #define NVME_MINORS (1U << MINORBITS) #define NVME_Q_DEPTH 1024 -#define NVME_AQ_DEPTH 64 +#define NVME_AQ_DEPTH 256 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) #define ADMIN_TIMEOUT (admin_timeout * HZ) @@ -152,6 +152,7 @@ struct nvme_cmd_info { */ #define NVME_INT_PAGES 2 #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size) +#define NVME_INT_MASK 0x01 /* * Will slightly overestimate the number of pages needed. This is OK @@ -257,7 +258,7 @@ static void *iod_get_private(struct nvme_iod *iod) */ static bool iod_should_kfree(struct nvme_iod *iod) { - return (iod->private & 0x01) == 0; + return (iod->private & NVME_INT_MASK) == 0; } /* Special values must be less than 0x1000 */ @@ -301,8 +302,6 @@ static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn) static void async_req_completion(struct nvme_queue *nvmeq, void *ctx, struct nvme_completion *cqe) { - struct request *req = ctx; - u32 result = le32_to_cpup(&cqe->result); u16 status = le16_to_cpup(&cqe->status) >> 1; @@ -311,8 +310,6 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx, if (status == NVME_SC_SUCCESS) dev_warn(nvmeq->q_dmadev, "async event result %08x\n", result); - - blk_mq_free_hctx_request(nvmeq->hctx, req); } static void abort_completion(struct nvme_queue *nvmeq, void *ctx, @@ -432,7 +429,6 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev, { unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) : sizeof(struct nvme_dsm_range); - unsigned long mask = 0; struct nvme_iod *iod; if (rq->nr_phys_segments <= NVME_INT_PAGES && @@ -440,9 +436,8 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev, struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq); iod = cmd->iod; - mask = 0x01; iod_init(iod, size, rq->nr_phys_segments, - (unsigned long) rq | 0x01); + (unsigned long) rq | NVME_INT_MASK); return iod; } @@ -522,8 +517,6 @@ static void nvme_dif_remap(struct request *req, return; pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; - if (!pmap) - return; p = pmap; virt = bip_get_seed(bip); @@ -645,12 +638,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len, struct scatterlist *sg = iod->sg; int dma_len = sg_dma_len(sg); u64 dma_addr = sg_dma_address(sg); - int offset = offset_in_page(dma_addr); + u32 page_size = dev->page_size; + int offset = dma_addr & (page_size - 1); __le64 *prp_list; __le64 **list = iod_list(iod); dma_addr_t prp_dma; int nprps, i; - u32 page_size = dev->page_size; length -= (page_size - offset); if (length <= 0) @@ -1028,18 +1021,19 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev) struct nvme_cmd_info *cmd_info; struct request *req; - req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, false); + req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, true); if (IS_ERR(req)) return PTR_ERR(req); req->cmd_flags |= REQ_NO_TIMEOUT; cmd_info = blk_mq_rq_to_pdu(req); - nvme_set_info(cmd_info, req, async_req_completion); + nvme_set_info(cmd_info, NULL, async_req_completion); memset(&c, 0, sizeof(c)); c.common.opcode = nvme_admin_async_event; c.common.command_id = req->tag; + blk_mq_free_hctx_request(nvmeq->hctx, req); return __nvme_submit_cmd(nvmeq, &c); } @@ -1347,6 +1341,9 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) nvmeq->cq_vector = -1; spin_unlock_irq(&nvmeq->q_lock); + if (!nvmeq->qid && nvmeq->dev->admin_q) + blk_mq_freeze_queue_start(nvmeq->dev->admin_q); + irq_set_affinity_hint(vector, NULL); free_irq(vector, nvmeq); @@ -1378,8 +1375,6 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) adapter_delete_sq(dev, qid); adapter_delete_cq(dev, qid); } - if (!qid && dev->admin_q) - blk_mq_freeze_queue_start(dev->admin_q); spin_lock_irq(&nvmeq->q_lock); nvme_process_cq(nvmeq); @@ -1583,6 +1578,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) dev->admin_tagset.ops = &nvme_mq_admin_ops; dev->admin_tagset.nr_hw_queues = 1; dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1; + dev->admin_tagset.reserved_tags = 1; dev->admin_tagset.timeout = ADMIN_TIMEOUT; dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev); dev->admin_tagset.cmd_size = nvme_cmd_size(dev); @@ -1749,25 +1745,31 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) struct nvme_dev *dev = ns->dev; struct nvme_user_io io; struct nvme_command c; - unsigned length, meta_len; - int status, i; - struct nvme_iod *iod, *meta_iod = NULL; - dma_addr_t meta_dma_addr; - void *meta, *uninitialized_var(meta_mem); + unsigned length, meta_len, prp_len; + int status, write; + struct nvme_iod *iod; + dma_addr_t meta_dma = 0; + void *meta = NULL; if (copy_from_user(&io, uio, sizeof(io))) return -EFAULT; length = (io.nblocks + 1) << ns->lba_shift; meta_len = (io.nblocks + 1) * ns->ms; - if (meta_len && ((io.metadata & 3) || !io.metadata)) + if (meta_len && ((io.metadata & 3) || !io.metadata) && !ns->ext) return -EINVAL; + else if (meta_len && ns->ext) { + length += meta_len; + meta_len = 0; + } + + write = io.opcode & 1; switch (io.opcode) { case nvme_cmd_write: case nvme_cmd_read: case nvme_cmd_compare: - iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); + iod = nvme_map_user_pages(dev, write, io.addr, length); break; default: return -EINVAL; @@ -1776,6 +1778,27 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) if (IS_ERR(iod)) return PTR_ERR(iod); + prp_len = nvme_setup_prps(dev, iod, length, GFP_KERNEL); + if (length != prp_len) { + status = -ENOMEM; + goto unmap; + } + if (meta_len) { + meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, + &meta_dma, GFP_KERNEL); + if (!meta) { + status = -ENOMEM; + goto unmap; + } + if (write) { + if (copy_from_user(meta, (void __user *)io.metadata, + meta_len)) { + status = -EFAULT; + goto unmap; + } + } + } + memset(&c, 0, sizeof(c)); c.rw.opcode = io.opcode; c.rw.flags = io.flags; @@ -1787,75 +1810,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.reftag = cpu_to_le32(io.reftag); c.rw.apptag = cpu_to_le16(io.apptag); c.rw.appmask = cpu_to_le16(io.appmask); - - if (meta_len) { - meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, - meta_len); - if (IS_ERR(meta_iod)) { - status = PTR_ERR(meta_iod); - meta_iod = NULL; - goto unmap; - } - - meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, - &meta_dma_addr, GFP_KERNEL); - if (!meta_mem) { - status = -ENOMEM; - goto unmap; - } - - if (io.opcode & 1) { - int meta_offset = 0; - - for (i = 0; i < meta_iod->nents; i++) { - meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + - meta_iod->sg[i].offset; - memcpy(meta_mem + meta_offset, meta, - meta_iod->sg[i].length); - kunmap_atomic(meta); - meta_offset += meta_iod->sg[i].length; - } - } - - c.rw.metadata = cpu_to_le64(meta_dma_addr); - } - - length = nvme_setup_prps(dev, iod, length, GFP_KERNEL); c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); c.rw.prp2 = cpu_to_le64(iod->first_dma); - - if (length != (io.nblocks + 1) << ns->lba_shift) - status = -ENOMEM; - else - status = nvme_submit_io_cmd(dev, ns, &c, NULL); - - if (meta_len) { - if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { - int meta_offset = 0; - - for (i = 0; i < meta_iod->nents; i++) { - meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + - meta_iod->sg[i].offset; - memcpy(meta, meta_mem + meta_offset, - meta_iod->sg[i].length); - kunmap_atomic(meta); - meta_offset += meta_iod->sg[i].length; - } - } - - dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem, - meta_dma_addr); - } - + c.rw.metadata = cpu_to_le64(meta_dma); + status = nvme_submit_io_cmd(dev, ns, &c, NULL); unmap: - nvme_unmap_user_pages(dev, io.opcode & 1, iod); + nvme_unmap_user_pages(dev, write, iod); nvme_free_iod(dev, iod); - - if (meta_iod) { - nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); - nvme_free_iod(dev, meta_iod); + if (meta) { + if (status == NVME_SC_SUCCESS && !write) { + if (copy_to_user((void __user *)io.metadata, meta, + meta_len)) + status = -EFAULT; + } + dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma); } - return status; } @@ -2018,7 +1987,8 @@ static int nvme_revalidate_disk(struct gendisk *disk) struct nvme_dev *dev = ns->dev; struct nvme_id_ns *id; dma_addr_t dma_addr; - int lbaf, pi_type, old_ms; + u8 lbaf, pi_type; + u16 old_ms; unsigned short bs; id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, @@ -2039,6 +2009,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; ns->lba_shift = id->lbaf[lbaf].ds; ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); + ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); /* * If identify namespace failed, use default 512 byte block size so @@ -2055,14 +2026,14 @@ static int nvme_revalidate_disk(struct gendisk *disk) if (blk_get_integrity(disk) && (ns->pi_type != pi_type || ns->ms != old_ms || bs != queue_logical_block_size(disk->queue) || - (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT))) + (ns->ms && ns->ext))) blk_integrity_unregister(disk); ns->pi_type = pi_type; blk_queue_logical_block_size(ns->queue, bs); if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) && - !(id->flbas & NVME_NS_FLBAS_META_EXT)) + !ns->ext) nvme_init_integrity(ns); if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk))) @@ -2334,7 +2305,6 @@ static int nvme_dev_add(struct nvme_dev *dev) dev->oncs = le16_to_cpup(&ctrl->oncs); dev->abort_limit = ctrl->acl + 1; dev->vwc = ctrl->vwc; - dev->event_limit = min(ctrl->aerl + 1, 8); memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); @@ -2881,6 +2851,7 @@ static int nvme_dev_start(struct nvme_dev *dev) nvme_set_irq_hints(dev); + dev->event_limit = 1; return result; free_tags: @@ -3166,8 +3137,10 @@ static int __init nvme_init(void) nvme_char_major = result; nvme_class = class_create(THIS_MODULE, "nvme"); - if (!nvme_class) + if (IS_ERR(nvme_class)) { + result = PTR_ERR(nvme_class); goto unregister_chrdev; + } result = pci_register_driver(&nvme_driver); if (result) diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index e10196e0182d..6b736b00f63e 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c @@ -55,6 +55,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */ #define VPD_SERIAL_NUMBER 0x80 #define VPD_DEVICE_IDENTIFIERS 0x83 #define VPD_EXTENDED_INQUIRY 0x86 +#define VPD_BLOCK_LIMITS 0xB0 #define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1 /* CDB offsets */ @@ -132,9 +133,10 @@ static int sg_version_num = 30534; /* 2 digits for each component */ #define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80 #define INQ_DEVICE_IDENTIFICATION_PAGE 0x83 #define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86 +#define INQ_BDEV_LIMITS_PAGE 0xB0 #define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1 #define INQ_SERIAL_NUMBER_LENGTH 0x14 -#define INQ_NUM_SUPPORTED_VPD_PAGES 5 +#define INQ_NUM_SUPPORTED_VPD_PAGES 6 #define VERSION_SPC_4 0x06 #define ACA_UNSUPPORTED 0 #define STANDARD_INQUIRY_LENGTH 36 @@ -747,6 +749,7 @@ static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns, inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE; inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE; inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE; + inq_response[9] = INQ_BDEV_LIMITS_PAGE; xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); @@ -938,6 +941,25 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, return res; } +static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *inq_response, int alloc_len) +{ + __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue)); + __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); + __be32 discard_desc_count = cpu_to_be32(0x100); + + memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); + inq_response[1] = VPD_BLOCK_LIMITS; + inq_response[3] = 0x3c; /* Page Length */ + memcpy(&inq_response[8], &max_sectors, sizeof(u32)); + memcpy(&inq_response[20], &max_discard, sizeof(u32)); + + if (max_discard) + memcpy(&inq_response[24], &discard_desc_count, sizeof(u32)); + + return nvme_trans_copy_to_user(hdr, inq_response, 0x3c); +} + static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, int alloc_len) { @@ -2268,6 +2290,10 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr, case VPD_EXTENDED_INQUIRY: res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); break; + case VPD_BLOCK_LIMITS: + res = nvme_trans_bdev_limits_page(ns, hdr, inq_response, + alloc_len); + break; case VPD_BLOCK_DEV_CHARACTERISTICS: res = nvme_trans_bdev_char_page(ns, hdr, alloc_len); break; diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 2ce3dfd7e6b9..876d0c3eaf58 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c @@ -137,7 +137,7 @@ */ -static bool verbose = 0; +static int verbose; static int major = PG_MAJOR; static char *name = PG_NAME; static int disable = 0; @@ -168,7 +168,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY}; #include <asm/uaccess.h> -module_param(verbose, bool, 0644); +module_param(verbose, int, 0644); module_param(major, int, 0); module_param(name, charp, 0); module_param_array(drive0, int, NULL, 0); diff --git a/drivers/block/pmem.c b/drivers/block/pmem.c new file mode 100644 index 000000000000..eabf4a8d0085 --- /dev/null +++ b/drivers/block/pmem.c @@ -0,0 +1,262 @@ +/* + * Persistent Memory Driver + * + * Copyright (c) 2014, Intel Corporation. + * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. + * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <asm/cacheflush.h> +#include <linux/blkdev.h> +#include <linux/hdreg.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> + +#define PMEM_MINORS 16 + +struct pmem_device { + struct request_queue *pmem_queue; + struct gendisk *pmem_disk; + + /* One contiguous memory region per device */ + phys_addr_t phys_addr; + void *virt_addr; + size_t size; +}; + +static int pmem_major; +static atomic_t pmem_index; + +static void pmem_do_bvec(struct pmem_device *pmem, struct page *page, + unsigned int len, unsigned int off, int rw, + sector_t sector) +{ + void *mem = kmap_atomic(page); + size_t pmem_off = sector << 9; + + if (rw == READ) { + memcpy(mem + off, pmem->virt_addr + pmem_off, len); + flush_dcache_page(page); + } else { + flush_dcache_page(page); + memcpy(pmem->virt_addr + pmem_off, mem + off, len); + } + + kunmap_atomic(mem); +} + +static void pmem_make_request(struct request_queue *q, struct bio *bio) +{ + struct block_device *bdev = bio->bi_bdev; + struct pmem_device *pmem = bdev->bd_disk->private_data; + int rw; + struct bio_vec bvec; + sector_t sector; + struct bvec_iter iter; + int err = 0; + + if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) { + err = -EIO; + goto out; + } + + BUG_ON(bio->bi_rw & REQ_DISCARD); + + rw = bio_data_dir(bio); + sector = bio->bi_iter.bi_sector; + bio_for_each_segment(bvec, bio, iter) { + pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset, + rw, sector); + sector += bvec.bv_len >> 9; + } + +out: + bio_endio(bio, err); +} + +static int pmem_rw_page(struct block_device *bdev, sector_t sector, + struct page *page, int rw) +{ + struct pmem_device *pmem = bdev->bd_disk->private_data; + + pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); + page_endio(page, rw & WRITE, 0); + + return 0; +} + +static long pmem_direct_access(struct block_device *bdev, sector_t sector, + void **kaddr, unsigned long *pfn, long size) +{ + struct pmem_device *pmem = bdev->bd_disk->private_data; + size_t offset = sector << 9; + + if (!pmem) + return -ENODEV; + + *kaddr = pmem->virt_addr + offset; + *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT; + + return pmem->size - offset; +} + +static const struct block_device_operations pmem_fops = { + .owner = THIS_MODULE, + .rw_page = pmem_rw_page, + .direct_access = pmem_direct_access, +}; + +static struct pmem_device *pmem_alloc(struct device *dev, struct resource *res) +{ + struct pmem_device *pmem; + struct gendisk *disk; + int idx, err; + + err = -ENOMEM; + pmem = kzalloc(sizeof(*pmem), GFP_KERNEL); + if (!pmem) + goto out; + + pmem->phys_addr = res->start; + pmem->size = resource_size(res); + + err = -EINVAL; + if (!request_mem_region(pmem->phys_addr, pmem->size, "pmem")) { + dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", &pmem->phys_addr, pmem->size); + goto out_free_dev; + } + + /* + * Map the memory as non-cachable, as we can't write back the contents + * of the CPU caches in case of a crash. + */ + err = -ENOMEM; + pmem->virt_addr = ioremap_nocache(pmem->phys_addr, pmem->size); + if (!pmem->virt_addr) + goto out_release_region; + + pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL); + if (!pmem->pmem_queue) + goto out_unmap; + + blk_queue_make_request(pmem->pmem_queue, pmem_make_request); + blk_queue_max_hw_sectors(pmem->pmem_queue, 1024); + blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY); + + disk = alloc_disk(PMEM_MINORS); + if (!disk) + goto out_free_queue; + + idx = atomic_inc_return(&pmem_index) - 1; + + disk->major = pmem_major; + disk->first_minor = PMEM_MINORS * idx; + disk->fops = &pmem_fops; + disk->private_data = pmem; + disk->queue = pmem->pmem_queue; + disk->flags = GENHD_FL_EXT_DEVT; + sprintf(disk->disk_name, "pmem%d", idx); + disk->driverfs_dev = dev; + set_capacity(disk, pmem->size >> 9); + pmem->pmem_disk = disk; + + add_disk(disk); + + return pmem; + +out_free_queue: + blk_cleanup_queue(pmem->pmem_queue); +out_unmap: + iounmap(pmem->virt_addr); +out_release_region: + release_mem_region(pmem->phys_addr, pmem->size); +out_free_dev: + kfree(pmem); +out: + return ERR_PTR(err); +} + +static void pmem_free(struct pmem_device *pmem) +{ + del_gendisk(pmem->pmem_disk); + put_disk(pmem->pmem_disk); + blk_cleanup_queue(pmem->pmem_queue); + iounmap(pmem->virt_addr); + release_mem_region(pmem->phys_addr, pmem->size); + kfree(pmem); +} + +static int pmem_probe(struct platform_device *pdev) +{ + struct pmem_device *pmem; + struct resource *res; + + if (WARN_ON(pdev->num_resources > 1)) + return -ENXIO; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + pmem = pmem_alloc(&pdev->dev, res); + if (IS_ERR(pmem)) + return PTR_ERR(pmem); + + platform_set_drvdata(pdev, pmem); + + return 0; +} + +static int pmem_remove(struct platform_device *pdev) +{ + struct pmem_device *pmem = platform_get_drvdata(pdev); + + pmem_free(pmem); + return 0; +} + +static struct platform_driver pmem_driver = { + .probe = pmem_probe, + .remove = pmem_remove, + .driver = { + .owner = THIS_MODULE, + .name = "pmem", + }, +}; + +static int __init pmem_init(void) +{ + int error; + + pmem_major = register_blkdev(0, "pmem"); + if (pmem_major < 0) + return pmem_major; + + error = platform_driver_register(&pmem_driver); + if (error) + unregister_blkdev(pmem_major, "pmem"); + return error; +} +module_init(pmem_init); + +static void pmem_exit(void) +{ + platform_driver_unregister(&pmem_driver); + unregister_blkdev(pmem_major, "pmem"); +} +module_exit(pmem_exit); + +MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b40af3203089..812523330a78 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3762,8 +3762,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) goto out_tag_set; } - /* We use the default size, but let's be explicit about it. */ - blk_queue_physical_block_size(q, SECTOR_SIZE); + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ /* set io sizes to object size */ segment_size = rbd_obj_bytes(&rbd_dev->header); @@ -5301,8 +5301,13 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) if (mapping) { ret = rbd_dev_header_watch_sync(rbd_dev); - if (ret) + if (ret) { + if (ret == -ENOENT) + pr_info("image %s/%s does not exist\n", + rbd_dev->spec->pool_name, + rbd_dev->spec->image_name); goto out_header_name; + } } ret = rbd_dev_header_info(rbd_dev); @@ -5319,8 +5324,14 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) ret = rbd_spec_fill_snap_id(rbd_dev); else ret = rbd_spec_fill_names(rbd_dev); - if (ret) + if (ret) { + if (ret == -ENOENT) + pr_info("snap %s/%s@%s does not exist\n", + rbd_dev->spec->pool_name, + rbd_dev->spec->image_name, + rbd_dev->spec->snap_name); goto err_out_probe; + } if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { ret = rbd_dev_v2_parent_info(rbd_dev); @@ -5390,8 +5401,11 @@ static ssize_t do_rbd_add(struct bus_type *bus, /* pick the pool */ rc = rbd_add_get_pool_id(rbdc, spec->pool_name); - if (rc < 0) + if (rc < 0) { + if (rc == -ENOENT) + pr_info("pool %s does not exist\n", spec->pool_name); goto err_out_client; + } spec->pool_id = (u64)rc; /* The ceph file layout needs to fit pool id in 32 bits */ @@ -5673,7 +5687,7 @@ static int __init rbd_init(void) /* * The number of active work items is limited by the number of - * rbd devices, so leave @max_active at default. + * rbd devices * queue depth, so leave @max_active at default. */ rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0); if (!rbd_wq) { diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 523ee8fd4c15..c264f2d284a7 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -440,9 +440,9 @@ static inline void seek_track(struct floppy_state *fs, int n) static inline void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count) { - st_le16(&cp->req_count, count); - st_le16(&cp->command, cmd); - st_le32(&cp->phy_addr, virt_to_bus(buf)); + cp->req_count = cpu_to_le16(count); + cp->command = cpu_to_le16(cmd); + cp->phy_addr = cpu_to_le32(virt_to_bus(buf)); cp->xfer_status = 0; } @@ -771,8 +771,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) } /* turn off DMA */ out_le32(&dr->control, (RUN | PAUSE) << 16); - stat = ld_le16(&cp->xfer_status); - resid = ld_le16(&cp->res_count); + stat = le16_to_cpu(cp->xfer_status); + resid = le16_to_cpu(cp->res_count); if (intr & ERROR_INTR) { n = fs->scount - 1 - resid / 512; if (n > 0) { @@ -1170,7 +1170,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index) fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd)); - st_le16(&fs->dma_cmd[1].command, DBDMA_STOP); + fs->dma_cmd[1].command = cpu_to_le16(DBDMA_STOP); if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD) swim3_mb_event(mdev, MB_FD); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 655e570b9b31..5ea2f0bbbc7c 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -342,7 +342,7 @@ static void virtblk_config_changed_work(struct work_struct *work) struct request_queue *q = vblk->disk->queue; char cap_str_2[10], cap_str_10[10]; char *envp[] = { "RESIZE=1", NULL }; - u64 capacity, size; + u64 capacity; /* Host must always specify the capacity. */ virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); @@ -354,9 +354,10 @@ static void virtblk_config_changed_work(struct work_struct *work) capacity = (sector_t)-1; } - size = capacity * queue_logical_block_size(q); - string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); - string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); + string_get_size(capacity, queue_logical_block_size(q), + STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); + string_get_size(capacity, queue_logical_block_size(q), + STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); dev_notice(&vdev->dev, "new size: %llu %d-byte logical blocks (%s/%s)\n", diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 2a04d341e598..bd2b3bbbb22c 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -34,6 +34,8 @@ * IN THE SOFTWARE. */ +#define pr_fmt(fmt) "xen-blkback: " fmt + #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/list.h> @@ -211,7 +213,7 @@ static int add_persistent_gnt(struct xen_blkif *blkif, else if (persistent_gnt->gnt > this->gnt) new = &((*new)->rb_right); else { - pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); + pr_alert_ratelimited("trying to add a gref that's already in the tree\n"); return -EINVAL; } } @@ -242,7 +244,7 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, node = node->rb_right; else { if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { - pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); + pr_alert_ratelimited("requesting a grant already in use\n"); return NULL; } set_bit(PERSISTENT_GNT_ACTIVE, data->flags); @@ -257,7 +259,7 @@ static void put_persistent_gnt(struct xen_blkif *blkif, struct persistent_gnt *persistent_gnt) { if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) - pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); + pr_alert_ratelimited("freeing a grant already unused\n"); set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); atomic_dec(&blkif->persistent_gnt_in_use); @@ -374,7 +376,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) } if (work_pending(&blkif->persistent_purge_work)) { - pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); + pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); return; } @@ -396,7 +398,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) total = num_clean; - pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); + pr_debug("Going to purge %u persistent grants\n", num_clean); BUG_ON(!list_empty(&blkif->persistent_purge_list)); root = &blkif->persistent_gnts; @@ -428,13 +430,13 @@ purge_list: * with the requested num */ if (!scan_used && !clean_used) { - pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); + pr_debug("Still missing %u purged frames\n", num_clean); scan_used = true; goto purge_list; } finished: if (!clean_used) { - pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n"); + pr_debug("Finished scanning for grants to clean, removing used flag\n"); clean_used = true; goto purge_list; } @@ -444,7 +446,7 @@ finished: /* We can defer this work */ schedule_work(&blkif->persistent_purge_work); - pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); + pr_debug("Purged %u/%u\n", (total - num_clean), total); return; } @@ -520,20 +522,20 @@ static void xen_vbd_resize(struct xen_blkif *blkif) struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); unsigned long long new_size = vbd_sz(vbd); - pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", + pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n", blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); - pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); + pr_info("VBD Resize: new size %llu\n", new_size); vbd->size = new_size; again: err = xenbus_transaction_start(&xbt); if (err) { - pr_warn(DRV_PFX "Error starting transaction"); + pr_warn("Error starting transaction\n"); return; } err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", (unsigned long long)vbd_sz(vbd)); if (err) { - pr_warn(DRV_PFX "Error writing new size"); + pr_warn("Error writing new size\n"); goto abort; } /* @@ -543,7 +545,7 @@ again: */ err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); if (err) { - pr_warn(DRV_PFX "Error writing the state"); + pr_warn("Error writing the state\n"); goto abort; } @@ -551,7 +553,7 @@ again: if (err == -EAGAIN) goto again; if (err) - pr_warn(DRV_PFX "Error ending transaction"); + pr_warn("Error ending transaction\n"); return; abort: xenbus_transaction_end(xbt, 1); @@ -578,7 +580,7 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) static void print_stats(struct xen_blkif *blkif) { - pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" + pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" " | ds %4llu | pg: %4u/%4d\n", current->comm, blkif->st_oo_req, blkif->st_rd_req, blkif->st_wr_req, @@ -855,7 +857,7 @@ again: /* This is a newly mapped grant */ BUG_ON(new_map_idx >= segs_to_map); if (unlikely(map[new_map_idx].status != 0)) { - pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); + pr_debug("invalid buffer -- could not remap it\n"); put_free_pages(blkif, &pages[seg_idx]->page, 1); pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; ret |= 1; @@ -891,14 +893,14 @@ again: goto next; } pages[seg_idx]->persistent_gnt = persistent_gnt; - pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", + pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n", persistent_gnt->gnt, blkif->persistent_gnt_c, xen_blkif_max_pgrants); goto next; } if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { blkif->vbd.overflow_max_grants = 1; - pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", + pr_debug("domain %u, device %#x is using maximum number of persistent grants\n", blkif->domid, blkif->vbd.handle); } /* @@ -916,7 +918,7 @@ next: return ret; out_of_memory: - pr_alert(DRV_PFX "%s: out of memory\n", __func__); + pr_alert("%s: out of memory\n", __func__); put_free_pages(blkif, pages_to_gnt, segs_to_map); return -ENOMEM; } @@ -996,7 +998,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif, err = xen_vbd_translate(&preq, blkif, WRITE); if (err) { - pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n", + pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", preq.sector_number, preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); goto fail_response; @@ -1012,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif, GFP_KERNEL, secure); fail_response: if (err == -EOPNOTSUPP) { - pr_debug(DRV_PFX "discard op failed, not supported\n"); + pr_debug("discard op failed, not supported\n"); status = BLKIF_RSP_EOPNOTSUPP; } else if (err) status = BLKIF_RSP_ERROR; @@ -1056,16 +1058,16 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) /* An error fails the entire request. */ if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && (error == -EOPNOTSUPP)) { - pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); + pr_debug("flush diskcache op failed, not supported\n"); xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); pending_req->status = BLKIF_RSP_EOPNOTSUPP; } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && (error == -EOPNOTSUPP)) { - pr_debug(DRV_PFX "write barrier op failed, not supported\n"); + pr_debug("write barrier op failed, not supported\n"); xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); pending_req->status = BLKIF_RSP_EOPNOTSUPP; } else if (error) { - pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," + pr_debug("Buffer not up-to-date at end of operation," " error=%d\n", error); pending_req->status = BLKIF_RSP_ERROR; } @@ -1110,7 +1112,7 @@ __do_block_io_op(struct xen_blkif *blkif) if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) { rc = blk_rings->common.rsp_prod_pvt; - pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", + pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", rp, rc, rp - rc, blkif->vbd.pdevice); return -EACCES; } @@ -1217,8 +1219,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, if ((req->operation == BLKIF_OP_INDIRECT) && (req_operation != BLKIF_OP_READ) && (req_operation != BLKIF_OP_WRITE)) { - pr_debug(DRV_PFX "Invalid indirect operation (%u)\n", - req_operation); + pr_debug("Invalid indirect operation (%u)\n", req_operation); goto fail_response; } @@ -1252,8 +1253,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || unlikely((req->operation == BLKIF_OP_INDIRECT) && (nseg > MAX_INDIRECT_SEGMENTS))) { - pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", - nseg); + pr_debug("Bad number of segments in request (%d)\n", nseg); /* Haven't submitted any bio's yet. */ goto fail_response; } @@ -1288,7 +1288,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, } if (xen_vbd_translate(&preq, blkif, operation) != 0) { - pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", + pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", operation == READ ? "read" : "write", preq.sector_number, preq.sector_number + preq.nr_sects, @@ -1303,7 +1303,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, for (i = 0; i < nseg; i++) { if (((int)preq.sector_number|(int)seg[i].nsec) & ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { - pr_debug(DRV_PFX "Misaligned I/O request from domain %d", + pr_debug("Misaligned I/O request from domain %d\n", blkif->domid); goto fail_response; } diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 375d28851860..f620b5d3f77c 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -44,12 +44,6 @@ #include <xen/interface/io/blkif.h> #include <xen/interface/io/protocols.h> -#define DRV_PFX "xen-blkback:" -#define DPRINTK(fmt, args...) \ - pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ - __func__, __LINE__, ##args) - - /* * This is the maximum number of segments that would be allowed in indirect * requests. This value will also be passed to the frontend. diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index e3afe97280b1..6ab69ad61ee1 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -14,6 +14,8 @@ */ +#define pr_fmt(fmt) "xen-blkback: " fmt + #include <stdarg.h> #include <linux/module.h> #include <linux/kthread.h> @@ -21,6 +23,9 @@ #include <xen/grant_table.h> #include "common.h" +/* Enlarge the array size in order to fully show blkback name. */ +#define BLKBACK_NAME_LEN (20) + struct backend_info { struct xenbus_device *dev; struct xen_blkif *blkif; @@ -70,7 +75,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf) else devname = devpath; - snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname); + snprintf(buf, BLKBACK_NAME_LEN, "blkback.%d.%s", blkif->domid, devname); kfree(devpath); return 0; @@ -79,7 +84,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf) static void xen_update_blkif_status(struct xen_blkif *blkif) { int err; - char name[TASK_COMM_LEN]; + char name[BLKBACK_NAME_LEN]; /* Not ready to connect? */ if (!blkif->irq || !blkif->vbd.bdev) @@ -193,7 +198,7 @@ fail: return ERR_PTR(-ENOMEM); } -static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, +static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref, unsigned int evtchn) { int err; @@ -202,7 +207,8 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, if (blkif->irq) return 0; - err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring); + err = xenbus_map_ring_valloc(blkif->be->dev, &gref, 1, + &blkif->blk_ring); if (err < 0) return err; @@ -423,14 +429,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, FMODE_READ : FMODE_WRITE, NULL); if (IS_ERR(bdev)) { - DPRINTK("xen_vbd_create: device %08x could not be opened.\n", + pr_warn("xen_vbd_create: device %08x could not be opened\n", vbd->pdevice); return -ENOENT; } vbd->bdev = bdev; if (vbd->bdev->bd_disk == NULL) { - DPRINTK("xen_vbd_create: device %08x doesn't exist.\n", + pr_warn("xen_vbd_create: device %08x doesn't exist\n", vbd->pdevice); xen_vbd_free(vbd); return -ENOENT; @@ -449,7 +455,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, if (q && blk_queue_secdiscard(q)) vbd->discard_secure = true; - DPRINTK("Successful creation of handle=%04x (dom=%u)\n", + pr_debug("Successful creation of handle=%04x (dom=%u)\n", handle, blkif->domid); return 0; } @@ -457,7 +463,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev) { struct backend_info *be = dev_get_drvdata(&dev->dev); - DPRINTK(""); + pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); if (be->major || be->minor) xenvbd_sysfs_delif(dev); @@ -563,6 +569,10 @@ static int xen_blkbk_probe(struct xenbus_device *dev, int err; struct backend_info *be = kzalloc(sizeof(struct backend_info), GFP_KERNEL); + + /* match the pr_debug in xen_blkbk_remove */ + pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); + if (!be) { xenbus_dev_fatal(dev, -ENOMEM, "allocating backend structure"); @@ -594,7 +604,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev, return 0; fail: - DPRINTK("failed"); + pr_warn("%s failed\n", __func__); xen_blkbk_remove(dev); return err; } @@ -618,7 +628,7 @@ static void backend_changed(struct xenbus_watch *watch, unsigned long handle; char *device_type; - DPRINTK(""); + pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x", &major, &minor); @@ -637,7 +647,7 @@ static void backend_changed(struct xenbus_watch *watch, if (be->major | be->minor) { if (be->major != major || be->minor != minor) - pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", + pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n", be->major, be->minor, major, minor); return; } @@ -698,13 +708,12 @@ static void frontend_changed(struct xenbus_device *dev, struct backend_info *be = dev_get_drvdata(&dev->dev); int err; - DPRINTK("%s", xenbus_strstate(frontend_state)); + pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state)); switch (frontend_state) { case XenbusStateInitialising: if (dev->state == XenbusStateClosed) { - pr_info(DRV_PFX "%s: prepare for reconnect\n", - dev->nodename); + pr_info("%s: prepare for reconnect\n", dev->nodename); xenbus_switch_state(dev, XenbusStateInitWait); } break; @@ -771,7 +780,7 @@ static void connect(struct backend_info *be) int err; struct xenbus_device *dev = be->dev; - DPRINTK("%s", dev->otherend); + pr_debug("%s %s\n", __func__, dev->otherend); /* Supply the information about the device the frontend needs */ again: @@ -857,7 +866,7 @@ static int connect_ring(struct backend_info *be) char protocol[64] = ""; int err; - DPRINTK("%s", dev->otherend); + pr_debug("%s %s\n", __func__, dev->otherend); err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", &ring_ref, "event-channel", "%u", &evtchn, NULL); @@ -892,7 +901,7 @@ static int connect_ring(struct backend_info *be) be->blkif->vbd.feature_gnt_persistent = pers_grants; be->blkif->vbd.overflow_max_grants = 0; - pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n", + pr_info("ring-ref %ld, event-channel %d, protocol %d (%s) %s\n", ring_ref, evtchn, be->blkif->blk_protocol, protocol, pers_grants ? "persistent grants" : ""); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 37779e4c4585..2c61cf8c6f61 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1245,6 +1245,7 @@ static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { struct blkif_sring *sring; + grant_ref_t gref; int err; info->ring_ref = GRANT_INVALID_REF; @@ -1257,13 +1258,13 @@ static int setup_blkring(struct xenbus_device *dev, SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); - err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); + err = xenbus_grant_ring(dev, info->ring.sring, 1, &gref); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } - info->ring_ref = err; + info->ring_ref = gref; err = xenbus_alloc_evtchn(dev, &info->evtchn); if (err) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 871bd3550cb0..c94386aa563d 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -43,11 +43,22 @@ static const char *default_compressor = "lzo"; /* Module params (documentation at end) */ static unsigned int num_devices = 1; +static inline void deprecated_attr_warn(const char *name) +{ + pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n", + task_pid_nr(current), + current->comm, + name, + "See zram documentation."); +} + #define ZRAM_ATTR_RO(name) \ static ssize_t name##_show(struct device *d, \ struct device_attribute *attr, char *b) \ { \ struct zram *zram = dev_to_zram(d); \ + \ + deprecated_attr_warn(__stringify(name)); \ return scnprintf(b, PAGE_SIZE, "%llu\n", \ (u64)atomic64_read(&zram->stats.name)); \ } \ @@ -89,6 +100,7 @@ static ssize_t orig_data_size_show(struct device *dev, { struct zram *zram = dev_to_zram(dev); + deprecated_attr_warn("orig_data_size"); return scnprintf(buf, PAGE_SIZE, "%llu\n", (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT); } @@ -99,6 +111,7 @@ static ssize_t mem_used_total_show(struct device *dev, u64 val = 0; struct zram *zram = dev_to_zram(dev); + deprecated_attr_warn("mem_used_total"); down_read(&zram->init_lock); if (init_done(zram)) { struct zram_meta *meta = zram->meta; @@ -128,6 +141,7 @@ static ssize_t mem_limit_show(struct device *dev, u64 val; struct zram *zram = dev_to_zram(dev); + deprecated_attr_warn("mem_limit"); down_read(&zram->init_lock); val = zram->limit_pages; up_read(&zram->init_lock); @@ -159,6 +173,7 @@ static ssize_t mem_used_max_show(struct device *dev, u64 val = 0; struct zram *zram = dev_to_zram(dev); + deprecated_attr_warn("mem_used_max"); down_read(&zram->init_lock); if (init_done(zram)) val = atomic_long_read(&zram->stats.max_used_pages); @@ -670,8 +685,12 @@ out: static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, int offset, int rw) { + unsigned long start_time = jiffies; int ret; + generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, + &zram->disk->part0); + if (rw == READ) { atomic64_inc(&zram->stats.num_reads); ret = zram_bvec_read(zram, bvec, index, offset); @@ -680,6 +699,8 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, ret = zram_bvec_write(zram, bvec, index, offset); } + generic_end_io_acct(rw, &zram->disk->part0, start_time); + if (unlikely(ret)) { if (rw == READ) atomic64_inc(&zram->stats.failed_reads); @@ -1027,6 +1048,55 @@ static DEVICE_ATTR_RW(mem_used_max); static DEVICE_ATTR_RW(max_comp_streams); static DEVICE_ATTR_RW(comp_algorithm); +static ssize_t io_stat_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zram *zram = dev_to_zram(dev); + ssize_t ret; + + down_read(&zram->init_lock); + ret = scnprintf(buf, PAGE_SIZE, + "%8llu %8llu %8llu %8llu\n", + (u64)atomic64_read(&zram->stats.failed_reads), + (u64)atomic64_read(&zram->stats.failed_writes), + (u64)atomic64_read(&zram->stats.invalid_io), + (u64)atomic64_read(&zram->stats.notify_free)); + up_read(&zram->init_lock); + + return ret; +} + +static ssize_t mm_stat_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zram *zram = dev_to_zram(dev); + u64 orig_size, mem_used = 0; + long max_used; + ssize_t ret; + + down_read(&zram->init_lock); + if (init_done(zram)) + mem_used = zs_get_total_pages(zram->meta->mem_pool); + + orig_size = atomic64_read(&zram->stats.pages_stored); + max_used = atomic_long_read(&zram->stats.max_used_pages); + + ret = scnprintf(buf, PAGE_SIZE, + "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n", + orig_size << PAGE_SHIFT, + (u64)atomic64_read(&zram->stats.compr_data_size), + mem_used << PAGE_SHIFT, + zram->limit_pages << PAGE_SHIFT, + max_used << PAGE_SHIFT, + (u64)atomic64_read(&zram->stats.zero_pages), + (u64)atomic64_read(&zram->stats.num_migrated)); + up_read(&zram->init_lock); + + return ret; +} + +static DEVICE_ATTR_RO(io_stat); +static DEVICE_ATTR_RO(mm_stat); ZRAM_ATTR_RO(num_reads); ZRAM_ATTR_RO(num_writes); ZRAM_ATTR_RO(failed_reads); @@ -1054,6 +1124,8 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_mem_used_max.attr, &dev_attr_max_comp_streams.attr, &dev_attr_comp_algorithm.attr, + &dev_attr_io_stat.attr, + &dev_attr_mm_stat.attr, NULL, }; @@ -1082,6 +1154,7 @@ static int create_device(struct zram *zram, int device_id) if (!zram->disk) { pr_warn("Error allocating disk structure for device %d\n", device_id); + ret = -ENOMEM; goto out_free_queue; } diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 17056e589146..570c598f4ce9 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -84,6 +84,7 @@ struct zram_stats { atomic64_t compr_data_size; /* compressed size of pages stored */ atomic64_t num_reads; /* failed + successful */ atomic64_t num_writes; /* --do-- */ + atomic64_t num_migrated; /* no. of migrated object */ atomic64_t failed_reads; /* can happen when memory is too low */ atomic64_t failed_writes; /* can happen when memory is too low */ atomic64_t invalid_io; /* non-page-aligned I/O requests */ |