diff options
author | Andreas Gruenbacher <agruen@linbit.com> | 2011-07-03 13:26:43 +0200 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2014-02-17 16:42:24 +0100 |
commit | b30ab7913b0a7b1d3b1091c8cb3abb1a9f1e0824 (patch) | |
tree | 308a13ddc14374d86a2874d740aa5bc17a64b54f /drivers/block/drbd/drbd_req.c | |
parent | 547616979372b65646d691e8dab90e850be582fe (diff) | |
download | linux-b30ab7913b0a7b1d3b1091c8cb3abb1a9f1e0824.tar.gz linux-b30ab7913b0a7b1d3b1091c8cb3abb1a9f1e0824.tar.xz |
drbd: Rename "mdev" to "device"
sed -i -e 's:mdev:device:g'
Signed-off-by: Andreas Gruenbacher <agruen@linbit.com>
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 348 |
1 files changed, 174 insertions, 174 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 601304ce17c3..ab6abcbea6ab 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -31,37 +31,37 @@ #include "drbd_req.h" -static bool drbd_may_do_local_read(struct drbd_device *mdev, sector_t sector, int size); +static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size); /* Update disk stats at start of I/O request */ -static void _drbd_start_io_acct(struct drbd_device *mdev, struct drbd_request *req) +static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) { const int rw = bio_data_dir(req->master_bio); int cpu; cpu = part_stat_lock(); - part_round_stats(cpu, &mdev->vdisk->part0); - part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); - part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9); + part_round_stats(cpu, &device->vdisk->part0); + part_stat_inc(cpu, &device->vdisk->part0, ios[rw]); + part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9); (void) cpu; /* The macro invocations above want the cpu argument, I do not like the compiler warning about cpu only assigned but never used... */ - part_inc_in_flight(&mdev->vdisk->part0, rw); + part_inc_in_flight(&device->vdisk->part0, rw); part_stat_unlock(); } /* Update disk stats when completing request upwards */ -static void _drbd_end_io_acct(struct drbd_device *mdev, struct drbd_request *req) +static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) { int rw = bio_data_dir(req->master_bio); unsigned long duration = jiffies - req->start_time; int cpu; cpu = part_stat_lock(); - part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration); - part_round_stats(cpu, &mdev->vdisk->part0); - part_dec_in_flight(&mdev->vdisk->part0, rw); + part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration); + part_round_stats(cpu, &device->vdisk->part0); + part_dec_in_flight(&device->vdisk->part0, rw); part_stat_unlock(); } -static struct drbd_request *drbd_req_new(struct drbd_device *mdev, +static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src) { struct drbd_request *req; @@ -72,7 +72,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *mdev, drbd_req_make_private_bio(req, bio_src); req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; - req->w.mdev = mdev; + req->w.device = device; req->master_bio = bio_src; req->epoch = 0; @@ -95,7 +95,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *mdev, void drbd_req_destroy(struct kref *kref) { struct drbd_request *req = container_of(kref, struct drbd_request, kref); - struct drbd_device *mdev = req->w.mdev; + struct drbd_device *device = req->w.device; const unsigned s = req->rq_state; if ((req->master_bio && !(s & RQ_POSTPONED)) || @@ -132,10 +132,10 @@ void drbd_req_destroy(struct kref *kref) */ if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) { if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) - drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); + drbd_set_out_of_sync(device, req->i.sector, req->i.size); if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) - drbd_set_in_sync(mdev, req->i.sector, req->i.size); + drbd_set_in_sync(device, req->i.sector, req->i.size); } /* one might be tempted to move the drbd_al_complete_io @@ -149,9 +149,9 @@ void drbd_req_destroy(struct kref *kref) * we would forget to resync the corresponding extent. */ if (s & RQ_IN_ACT_LOG) { - if (get_ldev_if_state(mdev, D_FAILED)) { - drbd_al_complete_io(mdev, &req->i); - put_ldev(mdev); + if (get_ldev_if_state(device, D_FAILED)) { + drbd_al_complete_io(device, &req->i); + put_ldev(device); } else if (__ratelimit(&drbd_ratelimit_state)) { dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), " "but my Disk seems to have failed :(\n", @@ -179,25 +179,25 @@ void start_new_tl_epoch(struct drbd_tconn *tconn) wake_all_senders(tconn); } -void complete_master_bio(struct drbd_device *mdev, +void complete_master_bio(struct drbd_device *device, struct bio_and_error *m) { bio_endio(m->bio, m->error); - dec_ap_bio(mdev); + dec_ap_bio(device); } static void drbd_remove_request_interval(struct rb_root *root, struct drbd_request *req) { - struct drbd_device *mdev = req->w.mdev; + struct drbd_device *device = req->w.device; struct drbd_interval *i = &req->i; drbd_remove_interval(root, i); /* Wake up any processes waiting for this request to complete. */ if (i->waiting) - wake_up(&mdev->misc_wait); + wake_up(&device->misc_wait); } /* Helper for __req_mod(). @@ -210,7 +210,7 @@ static void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) { const unsigned s = req->rq_state; - struct drbd_device *mdev = req->w.mdev; + struct drbd_device *device = req->w.device; int rw; int error, ok; @@ -259,9 +259,9 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) struct rb_root *root; if (rw == WRITE) - root = &mdev->write_requests; + root = &device->write_requests; else - root = &mdev->read_requests; + root = &device->read_requests; drbd_remove_request_interval(root, req); } @@ -273,11 +273,11 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) * and reset the transfer log epoch write_cnt. */ if (rw == WRITE && - req->epoch == atomic_read(&mdev->tconn->current_tle_nr)) - start_new_tl_epoch(mdev->tconn); + req->epoch == atomic_read(&device->tconn->current_tle_nr)) + start_new_tl_epoch(device->tconn); /* Update disk stats */ - _drbd_end_io_acct(mdev, req); + _drbd_end_io_acct(device, req); /* If READ failed, * have it be pushed back to the retry work queue, @@ -305,7 +305,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) { - struct drbd_device *mdev = req->w.mdev; + struct drbd_device *device = req->w.device; D_ASSERT(m || (req->rq_state & RQ_POSTPONED)); if (!atomic_sub_and_test(put, &req->completion_ref)) @@ -328,12 +328,12 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, int clear, int set) { - struct drbd_device *mdev = req->w.mdev; + struct drbd_device *device = req->w.device; unsigned s = req->rq_state; int c_put = 0; int k_put = 0; - if (drbd_suspended(mdev) && !((s | clear) & RQ_COMPLETION_SUSP)) + if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP)) set |= RQ_COMPLETION_SUSP; /* apply */ @@ -351,7 +351,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, atomic_inc(&req->completion_ref); if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) { - inc_ap_pending(mdev); + inc_ap_pending(device); atomic_inc(&req->completion_ref); } @@ -362,7 +362,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, kref_get(&req->kref); /* wait for the DONE */ if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) - atomic_add(req->i.size >> 9, &mdev->ap_in_flight); + atomic_add(req->i.size >> 9, &device->ap_in_flight); if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP)) atomic_inc(&req->completion_ref); @@ -388,7 +388,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, } if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) { - dec_ap_pending(mdev); + dec_ap_pending(device); ++c_put; } @@ -397,7 +397,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { if (req->rq_state & RQ_NET_SENT) - atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); + atomic_sub(req->i.size >> 9, &device->ap_in_flight); ++k_put; } @@ -416,7 +416,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, /* If we made progress, retry conflicting peer requests, if any. */ if (req->i.waiting) - wake_up(&mdev->misc_wait); + wake_up(&device->misc_wait); if (c_put) k_put += drbd_req_put_completion_ref(req, m, c_put); @@ -424,7 +424,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, kref_sub(&req->kref, k_put, drbd_req_destroy); } -static void drbd_report_io_error(struct drbd_device *mdev, struct drbd_request *req) +static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) { char b[BDEVNAME_SIZE]; @@ -435,7 +435,7 @@ static void drbd_report_io_error(struct drbd_device *mdev, struct drbd_request * (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", (unsigned long long)req->i.sector, req->i.size >> 9, - bdevname(mdev->ldev->backing_bdev, b)); + bdevname(device->ldev->backing_bdev, b)); } /* obviously this could be coded as many single functions @@ -453,7 +453,7 @@ static void drbd_report_io_error(struct drbd_device *mdev, struct drbd_request * int __req_mod(struct drbd_request *req, enum drbd_req_event what, struct bio_and_error *m) { - struct drbd_device *mdev = req->w.mdev; + struct drbd_device *device = req->w.device; struct net_conf *nc; int p, rv = 0; @@ -476,7 +476,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, * and from w_read_retry_remote */ D_ASSERT(!(req->rq_state & RQ_NET_MASK)); rcu_read_lock(); - nc = rcu_dereference(mdev->tconn->net_conf); + nc = rcu_dereference(device->tconn->net_conf); p = nc->wire_protocol; rcu_read_unlock(); req->rq_state |= @@ -493,9 +493,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, case COMPLETED_OK: if (req->rq_state & RQ_WRITE) - mdev->writ_cnt += req->i.size >> 9; + device->writ_cnt += req->i.size >> 9; else - mdev->read_cnt += req->i.size >> 9; + device->read_cnt += req->i.size >> 9; mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); @@ -506,15 +506,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, break; case WRITE_COMPLETED_WITH_ERROR: - drbd_report_io_error(mdev, req); - __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR); + drbd_report_io_error(device, req); + __drbd_chk_io_error(device, DRBD_WRITE_ERROR); mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); break; case READ_COMPLETED_WITH_ERROR: - drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); - drbd_report_io_error(mdev, req); - __drbd_chk_io_error(mdev, DRBD_READ_ERROR); + drbd_set_out_of_sync(device, req->i.sector, req->i.size); + drbd_report_io_error(device, req); + __drbd_chk_io_error(device, DRBD_READ_ERROR); /* fall through. */ case READ_AHEAD_COMPLETED_WITH_ERROR: /* it is legal to fail READA, no __drbd_chk_io_error in that case. */ @@ -533,15 +533,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, * Corresponding drbd_remove_request_interval is in * drbd_req_complete() */ D_ASSERT(drbd_interval_empty(&req->i)); - drbd_insert_interval(&mdev->read_requests, &req->i); + drbd_insert_interval(&device->read_requests, &req->i); - set_bit(UNPLUG_REMOTE, &mdev->flags); + set_bit(UNPLUG_REMOTE, &device->flags); D_ASSERT(req->rq_state & RQ_NET_PENDING); D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); mod_rq_state(req, m, 0, RQ_NET_QUEUED); req->w.cb = w_send_read_req; - drbd_queue_work(&mdev->tconn->sender_work, &req->w); + drbd_queue_work(&device->tconn->sender_work, &req->w); break; case QUEUE_FOR_NET_WRITE: @@ -551,7 +551,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, /* Corresponding drbd_remove_request_interval is in * drbd_req_complete() */ D_ASSERT(drbd_interval_empty(&req->i)); - drbd_insert_interval(&mdev->write_requests, &req->i); + drbd_insert_interval(&device->write_requests, &req->i); /* NOTE * In case the req ended up on the transfer log before being @@ -570,28 +570,28 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, /* otherwise we may lose an unplug, which may cause some remote * io-scheduler timeout to expire, increasing maximum latency, * hurting performance. */ - set_bit(UNPLUG_REMOTE, &mdev->flags); + set_bit(UNPLUG_REMOTE, &device->flags); /* queue work item to send data */ D_ASSERT(req->rq_state & RQ_NET_PENDING); mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); req->w.cb = w_send_dblock; - drbd_queue_work(&mdev->tconn->sender_work, &req->w); + drbd_queue_work(&device->tconn->sender_work, &req->w); /* close the epoch, in case it outgrew the limit */ rcu_read_lock(); - nc = rcu_dereference(mdev->tconn->net_conf); + nc = rcu_dereference(device->tconn->net_conf); p = nc->max_epoch_size; rcu_read_unlock(); - if (mdev->tconn->current_tle_writes >= p) - start_new_tl_epoch(mdev->tconn); + if (device->tconn->current_tle_writes >= p) + start_new_tl_epoch(device->tconn); break; case QUEUE_FOR_SEND_OOS: mod_rq_state(req, m, 0, RQ_NET_QUEUED); req->w.cb = w_send_out_of_sync; - drbd_queue_work(&mdev->tconn->sender_work, &req->w); + drbd_queue_work(&device->tconn->sender_work, &req->w); break; case READ_RETRY_REMOTE_CANCELED: @@ -673,7 +673,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, D_ASSERT(req->rq_state & RQ_NET_PENDING); req->rq_state |= RQ_POSTPONED; if (req->i.waiting) - wake_up(&mdev->misc_wait); + wake_up(&device->misc_wait); /* Do not clear RQ_NET_PENDING. This request will make further * progress via restart_conflicting_writes() or * fail_postponed_requests(). Hopefully. */ @@ -701,9 +701,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, if (bio_data_dir(req->master_bio) == WRITE) rv = MR_WRITE; - get_ldev(mdev); /* always succeeds in this call path */ + get_ldev(device); /* always succeeds in this call path */ req->w.cb = w_restart_disk_io; - drbd_queue_work(&mdev->tconn->sender_work, &req->w); + drbd_queue_work(&device->tconn->sender_work, &req->w); break; case RESEND: @@ -724,7 +724,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); if (req->w.cb) { - drbd_queue_work(&mdev->tconn->sender_work, &req->w); + drbd_queue_work(&device->tconn->sender_work, &req->w); rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; } /* else: FIXME can this happen? */ break; @@ -756,7 +756,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, break; case QUEUE_AS_DRBD_BARRIER: - start_new_tl_epoch(mdev->tconn); + start_new_tl_epoch(device->tconn); mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); break; }; @@ -771,27 +771,27 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, * since size may be bigger than BM_BLOCK_SIZE, * we may need to check several bits. */ -static bool drbd_may_do_local_read(struct drbd_device *mdev, sector_t sector, int size) +static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size) { unsigned long sbnr, ebnr; sector_t esector, nr_sectors; - if (mdev->state.disk == D_UP_TO_DATE) + if (device->state.disk == D_UP_TO_DATE) return true; - if (mdev->state.disk != D_INCONSISTENT) + if (device->state.disk != D_INCONSISTENT) return false; esector = sector + (size >> 9) - 1; - nr_sectors = drbd_get_capacity(mdev->this_bdev); + nr_sectors = drbd_get_capacity(device->this_bdev); D_ASSERT(sector < nr_sectors); D_ASSERT(esector < nr_sectors); sbnr = BM_SECT_TO_BIT(sector); ebnr = BM_SECT_TO_BIT(esector); - return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0; + return drbd_bm_count_bits(device, sbnr, ebnr) == 0; } -static bool remote_due_to_read_balancing(struct drbd_device *mdev, sector_t sector, +static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector, enum drbd_read_balancing rbm) { struct backing_dev_info *bdi; @@ -799,11 +799,11 @@ static bool remote_due_to_read_balancing(struct drbd_device *mdev, sector_t sect switch (rbm) { case RB_CONGESTED_REMOTE: - bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info; + bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; return bdi_read_congested(bdi); case RB_LEAST_PENDING: - return atomic_read(&mdev->local_cnt) > - atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt); + return atomic_read(&device->local_cnt) > + atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt); case RB_32K_STRIPING: /* stripe_shift = 15 */ case RB_64K_STRIPING: case RB_128K_STRIPING: @@ -813,7 +813,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *mdev, sector_t sect stripe_shift = (rbm - RB_32K_STRIPING + 15); return (sector >> (stripe_shift - 9)) & 1; case RB_ROUND_ROBIN: - return test_and_change_bit(READ_BALANCE_RR, &mdev->flags); + return test_and_change_bit(READ_BALANCE_RR, &device->flags); case RB_PREFER_REMOTE: return true; case RB_PREFER_LOCAL: @@ -834,33 +834,33 @@ static bool remote_due_to_read_balancing(struct drbd_device *mdev, sector_t sect static void complete_conflicting_writes(struct drbd_request *req) { DEFINE_WAIT(wait); - struct drbd_device *mdev = req->w.mdev; + struct drbd_device *device = req->w.device; struct drbd_interval *i; sector_t sector = req->i.sector; int size = req->i.size; - i = drbd_find_overlap(&mdev->write_requests, sector, size); + i = drbd_find_overlap(&device->write_requests, sector, size); if (!i) return; for (;;) { - prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); - i = drbd_find_overlap(&mdev->write_requests, sector, size); + prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE); + i = drbd_find_overlap(&device->write_requests, sector, size); if (!i) break; /* Indicate to wake up device->misc_wait on progress. */ i->waiting = true; - spin_unlock_irq(&mdev->tconn->req_lock); + spin_unlock_irq(&device->tconn->req_lock); schedule(); - spin_lock_irq(&mdev->tconn->req_lock); + spin_lock_irq(&device->tconn->req_lock); } - finish_wait(&mdev->misc_wait, &wait); + finish_wait(&device->misc_wait, &wait); } /* called within req_lock and rcu_read_lock() */ -static void maybe_pull_ahead(struct drbd_device *mdev) +static void maybe_pull_ahead(struct drbd_device *device) { - struct drbd_tconn *tconn = mdev->tconn; + struct drbd_tconn *tconn = device->tconn; struct net_conf *nc; bool congested = false; enum drbd_on_congestion on_congestion; @@ -875,32 +875,32 @@ static void maybe_pull_ahead(struct drbd_device *mdev) /* If I don't even have good local storage, we can not reasonably try * to pull ahead of the peer. We also need the local reference to make - * sure mdev->act_log is there. + * sure device->act_log is there. */ - if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) + if (!get_ldev_if_state(device, D_UP_TO_DATE)) return; if (nc->cong_fill && - atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) { + atomic_read(&device->ap_in_flight) >= nc->cong_fill) { dev_info(DEV, "Congestion-fill threshold reached\n"); congested = true; } - if (mdev->act_log->used >= nc->cong_extents) { + if (device->act_log->used >= nc->cong_extents) { dev_info(DEV, "Congestion-extents threshold reached\n"); congested = true; } if (congested) { /* start a new epoch for non-mirrored writes */ - start_new_tl_epoch(mdev->tconn); + start_new_tl_epoch(device->tconn); if (on_congestion == OC_PULL_AHEAD) - _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); + _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL); else /*nc->on_congestion == OC_DISCONNECT */ - _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); + _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL); } - put_ldev(mdev); + put_ldev(device); } /* If this returns false, and req->private_bio is still set, @@ -914,19 +914,19 @@ static void maybe_pull_ahead(struct drbd_device *mdev) */ static bool do_remote_read(struct drbd_request *req) { - struct drbd_device *mdev = req->w.mdev; + struct drbd_device *device = req->w.device; enum drbd_read_balancing rbm; if (req->private_bio) { - if (!drbd_may_do_local_read(mdev, + if (!drbd_may_do_local_read(device, req->i.sector, req->i.size)) { bio_put(req->private_bio); req->private_bio = NULL; - put_ldev(mdev); + put_ldev(device); } } - if (mdev->state.pdsk != D_UP_TO_DATE) + if (device->state.pdsk != D_UP_TO_DATE) return false; if (req->private_bio == NULL) @@ -936,17 +936,17 @@ static bool do_remote_read(struct drbd_request *req) * protocol, pending requests etc. */ rcu_read_lock(); - rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing; + rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing; rcu_read_unlock(); if (rbm == RB_PREFER_LOCAL && req->private_bio) return false; /* submit locally */ - if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) { + if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { if (req->private_bio) { bio_put(req->private_bio); req->private_bio = NULL; - put_ldev(mdev); + put_ldev(device); } return true; } @@ -959,11 +959,11 @@ static bool do_remote_read(struct drbd_request *req) * which does NOT include those that we are L_AHEAD for. */ static int drbd_process_write_request(struct drbd_request *req) { - struct drbd_device *mdev = req->w.mdev; + struct drbd_device *device = req->w.device; int remote, send_oos; - remote = drbd_should_do_remote(mdev->state); - send_oos = drbd_should_send_out_of_sync(mdev->state); + remote = drbd_should_do_remote(device->state); + send_oos = drbd_should_send_out_of_sync(device->state); /* Need to replicate writes. Unless it is an empty flush, * which is better mapped to a DRBD P_BARRIER packet, @@ -987,7 +987,7 @@ static int drbd_process_write_request(struct drbd_request *req) if (remote) { _req_mod(req, TO_BE_SENT); _req_mod(req, QUEUE_FOR_NET_WRITE); - } else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size)) + } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) _req_mod(req, QUEUE_FOR_SEND_OOS); return remote; @@ -996,36 +996,36 @@ static int drbd_process_write_request(struct drbd_request *req) static void drbd_submit_req_private_bio(struct drbd_request *req) { - struct drbd_device *mdev = req->w.mdev; + struct drbd_device *device = req->w.device; struct bio *bio = req->private_bio; const int rw = bio_rw(bio); - bio->bi_bdev = mdev->ldev->backing_bdev; + bio->bi_bdev = device->ldev->backing_bdev; /* State may have changed since we grabbed our reference on the * ->ldev member. Double check, and short-circuit to endio. * In case the last activity log transaction failed to get on * stable storage, and this is a WRITE, we may not even submit * this bio. */ - if (get_ldev(mdev)) { - if (drbd_insert_fault(mdev, + if (get_ldev(device)) { + if (drbd_insert_fault(device, rw == WRITE ? DRBD_FAULT_DT_WR : rw == READ ? DRBD_FAULT_DT_RD : DRBD_FAULT_DT_RA)) bio_endio(bio, -EIO); else generic_make_request(bio); - put_ldev(mdev); + put_ldev(device); } else bio_endio(bio, -EIO); } -static void drbd_queue_write(struct drbd_device *mdev, struct drbd_request *req) +static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) { - spin_lock(&mdev->submit.lock); - list_add_tail(&req->tl_requests, &mdev->submit.writes); - spin_unlock(&mdev->submit.lock); - queue_work(mdev->submit.wq, &mdev->submit.worker); + spin_lock(&device->submit.lock); + list_add_tail(&req->tl_requests, &device->submit.writes); + spin_unlock(&device->submit.lock); + queue_work(device->submit.wq, &device->submit.worker); } /* returns the new drbd_request pointer, if the caller is expected to @@ -1034,15 +1034,15 @@ static void drbd_queue_write(struct drbd_device *mdev, struct drbd_request *req) * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. */ static struct drbd_request * -drbd_request_prepare(struct drbd_device *mdev, struct bio *bio, unsigned long start_time) +drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_time) { const int rw = bio_data_dir(bio); struct drbd_request *req; /* allocate outside of all locks; */ - req = drbd_req_new(mdev, bio); + req = drbd_req_new(device, bio); if (!req) { - dec_ap_bio(mdev); + dec_ap_bio(device); /* only pass the error to the upper layers. * if user cannot handle io errors, that's not our business. */ dev_err(DEV, "could not kmalloc() req\n"); @@ -1051,18 +1051,18 @@ drbd_request_prepare(struct drbd_device *mdev, struct bio *bio, unsigned long st } req->start_time = start_time; - if (!get_ldev(mdev)) { + if (!get_ldev(device)) { bio_put(req->private_bio); req->private_bio = NULL; } /* Update disk stats */ - _drbd_start_io_acct(mdev, req); + _drbd_start_io_acct(device, req); if (rw == WRITE && req->private_bio && req->i.size - && !test_bit(AL_SUSPENDED, &mdev->flags)) { - if (!drbd_al_begin_io_fastpath(mdev, &req->i)) { - drbd_queue_write(mdev, req); + && !test_bit(AL_SUSPENDED, &device->flags)) { + if (!drbd_al_begin_io_fastpath(device, &req->i)) { + drbd_queue_write(device, req); return NULL; } req->rq_state |= RQ_IN_ACT_LOG; @@ -1071,13 +1071,13 @@ drbd_request_prepare(struct drbd_device *mdev, struct bio *bio, unsigned long st return req; } -static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request *req) +static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) { const int rw = bio_rw(req->master_bio); struct bio_and_error m = { NULL, }; bool no_remote = false; - spin_lock_irq(&mdev->tconn->req_lock); + spin_lock_irq(&device->tconn->req_lock); if (rw == WRITE) { /* This may temporarily give up the req_lock, * but will re-aquire it before it returns here. @@ -1087,17 +1087,17 @@ static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request * /* check for congestion, and potentially stop sending * full data updates, but start sending "dirty bits" only. */ - maybe_pull_ahead(mdev); + maybe_pull_ahead(device); } - if (drbd_suspended(mdev)) { + if (drbd_suspended(device)) { /* push back and retry: */ req->rq_state |= RQ_POSTPONED; if (req->private_bio) { bio_put(req->private_bio); req->private_bio = NULL; - put_ldev(mdev); + put_ldev(device); } goto out; } @@ -1111,15 +1111,15 @@ static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request * } /* which transfer log epoch does this belong to? */ - req->epoch = atomic_read(&mdev->tconn->current_tle_nr); + req->epoch = atomic_read(&device->tconn->current_tle_nr); /* no point in adding empty flushes to the transfer log, * they are mapped to drbd barriers already. */ if (likely(req->i.size!=0)) { if (rw == WRITE) - mdev->tconn->current_tle_writes++; + device->tconn->current_tle_writes++; - list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log); + list_add_tail(&req->tl_requests, &device->tconn->transfer_log); } if (rw == WRITE) { @@ -1139,9 +1139,9 @@ static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request * /* needs to be marked within the same spinlock */ _req_mod(req, TO_BE_SUBMITTED); /* but we need to give up the spinlock to submit */ - spin_unlock_irq(&mdev->tconn->req_lock); + spin_unlock_irq(&device->tconn->req_lock); drbd_submit_req_private_bio(req); - spin_lock_irq(&mdev->tconn->req_lock); + spin_lock_irq(&device->tconn->req_lock); } else if (no_remote) { nodata: if (__ratelimit(&drbd_ratelimit_state)) @@ -1154,21 +1154,21 @@ nodata: out: if (drbd_req_put_completion_ref(req, &m, 1)) kref_put(&req->kref, drbd_req_destroy); - spin_unlock_irq(&mdev->tconn->req_lock); + spin_unlock_irq(&device->tconn->req_lock); if (m.bio) - complete_master_bio(mdev, &m); + complete_master_bio(device, &m); } -void __drbd_make_request(struct drbd_device *mdev, struct bio *bio, unsigned long start_time) +void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_time) { - struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time); + struct drbd_request *req = drbd_request_prepare(device, bio, start_time); if (IS_ERR_OR_NULL(req)) return; - drbd_send_and_submit(mdev, req); + drbd_send_and_submit(device, req); } -static void submit_fast_path(struct drbd_device *mdev, struct list_head *incoming) +static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) { struct drbd_request *req, *tmp; list_for_each_entry_safe(req, tmp, incoming, tl_requests) { @@ -1176,19 +1176,19 @@ static void submit_fast_path(struct drbd_device *mdev, struct list_head *incomin if (rw == WRITE /* rw != WRITE should not even end up here! */ && req->private_bio && req->i.size - && !test_bit(AL_SUSPENDED, &mdev->flags)) { - if (!drbd_al_begin_io_fastpath(mdev, &req->i)) + && !test_bit(AL_SUSPENDED, &device->flags)) { + if (!drbd_al_begin_io_fastpath(device, &req->i)) continue; req->rq_state |= RQ_IN_ACT_LOG; } list_del_init(&req->tl_requests); - drbd_send_and_submit(mdev, req); + drbd_send_and_submit(device, req); } } -static bool prepare_al_transaction_nonblock(struct drbd_device *mdev, +static bool prepare_al_transaction_nonblock(struct drbd_device *device, struct list_head *incoming, struct list_head *pending) { @@ -1196,9 +1196,9 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *mdev, int wake = 0; int err; - spin_lock_irq(&mdev->al_lock); + spin_lock_irq(&device->al_lock); list_for_each_entry_safe(req, tmp, incoming, tl_requests) { - err = drbd_al_begin_io_nonblock(mdev, &req->i); + err = drbd_al_begin_io_nonblock(device, &req->i); if (err == -EBUSY) wake = 1; if (err) @@ -1206,30 +1206,30 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *mdev, req->rq_state |= RQ_IN_ACT_LOG; list_move_tail(&req->tl_requests, pending); } - spin_unlock_irq(&mdev->al_lock); + spin_unlock_irq(&device->al_lock); if (wake) - wake_up(&mdev->al_wait); + wake_up(&device->al_wait); return !list_empty(pending); } void do_submit(struct work_struct *ws) { - struct drbd_device *mdev = container_of(ws, struct drbd_device, submit.worker); + struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker); LIST_HEAD(incoming); LIST_HEAD(pending); struct drbd_request *req, *tmp; for (;;) { - spin_lock(&mdev->submit.lock); - list_splice_tail_init(&mdev->submit.writes, &incoming); - spin_unlock(&mdev->submit.lock); + spin_lock(&device->submit.lock); + list_splice_tail_init(&device->submit.writes, &incoming); + spin_unlock(&device->submit.lock); - submit_fast_path(mdev, &incoming); + submit_fast_path(device, &incoming); if (list_empty(&incoming)) break; - wait_event(mdev->al_wait, prepare_al_transaction_nonblock(mdev, &incoming, &pending)); + wait_event(device->al_wait, prepare_al_transaction_nonblock(device, &incoming, &pending)); /* Maybe more was queued, while we prepared the transaction? * Try to stuff them into this transaction as well. * Be strictly non-blocking here, no wait_event, we already @@ -1243,17 +1243,17 @@ void do_submit(struct work_struct *ws) /* It is ok to look outside the lock, * it's only an optimization anyways */ - if (list_empty(&mdev->submit.writes)) + if (list_empty(&device->submit.writes)) break; - spin_lock(&mdev->submit.lock); - list_splice_tail_init(&mdev->submit.writes, &more_incoming); - spin_unlock(&mdev->submit.lock); + spin_lock(&device->submit.lock); + list_splice_tail_init(&device->submit.writes, &more_incoming); + spin_unlock(&device->submit.lock); if (list_empty(&more_incoming)) break; - made_progress = prepare_al_transaction_nonblock(mdev, &more_incoming, &more_pending); + made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending); list_splice_tail_init(&more_pending, &pending); list_splice_tail_init(&more_incoming, &incoming); @@ -1261,18 +1261,18 @@ void do_submit(struct work_struct *ws) if (!made_progress) break; } - drbd_al_begin_io_commit(mdev, false); + drbd_al_begin_io_commit(device, false); list_for_each_entry_safe(req, tmp, &pending, tl_requests) { list_del_init(&req->tl_requests); - drbd_send_and_submit(mdev, req); + drbd_send_and_submit(device, req); } } } void drbd_make_request(struct request_queue *q, struct bio *bio) { - struct drbd_device *mdev = (struct drbd_device *) q->queuedata; + struct drbd_device *device = (struct drbd_device *) q->queuedata; unsigned long start_time; start_time = jiffies; @@ -1282,8 +1282,8 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) */ D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512)); - inc_ap_bio(mdev); - __drbd_make_request(mdev, bio, start_time); + inc_ap_bio(device); + __drbd_make_request(device, bio, start_time); } /* This is called by bio_add_page(). @@ -1300,20 +1300,20 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) */ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) { - struct drbd_device *mdev = (struct drbd_device *) q->queuedata; + struct drbd_device *device = (struct drbd_device *) q->queuedata; unsigned int bio_size = bvm->bi_size; int limit = DRBD_MAX_BIO_SIZE; int backing_limit; - if (bio_size && get_ldev(mdev)) { + if (bio_size && get_ldev(device)) { unsigned int max_hw_sectors = queue_max_hw_sectors(q); struct request_queue * const b = - mdev->ldev->backing_bdev->bd_disk->queue; + device->ldev->backing_bdev->bd_disk->queue; if (b->merge_bvec_fn) { backing_limit = b->merge_bvec_fn(b, bvm, bvec); limit = min(limit, backing_limit); } - put_ldev(mdev); + put_ldev(device); if ((limit >> 9) > max_hw_sectors) limit = max_hw_sectors << 9; } @@ -1334,8 +1334,8 @@ static struct drbd_request *find_oldest_request(struct drbd_tconn *tconn) void request_timer_fn(unsigned long data) { - struct drbd_device *mdev = (struct drbd_device *) data; - struct drbd_tconn *tconn = mdev->tconn; + struct drbd_device *device = (struct drbd_device *) data; + struct drbd_tconn *tconn = device->tconn; struct drbd_request *req; /* oldest request */ struct net_conf *nc; unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ @@ -1343,12 +1343,12 @@ void request_timer_fn(unsigned long data) rcu_read_lock(); nc = rcu_dereference(tconn->net_conf); - if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS) + if (nc && device->state.conn >= C_WF_REPORT_PARAMS) ent = nc->timeout * HZ/10 * nc->ko_count; - if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */ - dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10; - put_ldev(mdev); + if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */ + dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10; + put_ldev(device); } rcu_read_unlock(); @@ -1363,7 +1363,7 @@ void request_timer_fn(unsigned long data) req = find_oldest_request(tconn); if (!req) { spin_unlock_irq(&tconn->req_lock); - mod_timer(&mdev->request_timer, now + et); + mod_timer(&device->request_timer, now + et); return; } @@ -1387,15 +1387,15 @@ void request_timer_fn(unsigned long data) time_after(now, req->start_time + ent) && !time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) { dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); - _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); + _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); } - if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev && + if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.device == device && time_after(now, req->start_time + dt) && - !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { + !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); - __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH); + __drbd_chk_io_error(device, DRBD_FORCE_DETACH); } nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; spin_unlock_irq(&tconn->req_lock); - mod_timer(&mdev->request_timer, nt); + mod_timer(&device->request_timer, nt); } |