aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c36
-rw-r--r--block/elevator.c4
-rw-r--r--block/row-iosched.c12
3 files changed, 37 insertions, 15 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 14a419c50..40cb3916c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -307,16 +307,20 @@ inline void __blk_run_queue_uncond(struct request_queue *q)
* number of active request_fn invocations such that blk_drain_queue()
* can wait until all these request_fn calls have finished.
*/
- q->request_fn_active++;
+
if (!q->notified_urgent &&
q->elevator->type->ops.elevator_is_urgent_fn &&
q->urgent_request_fn &&
q->elevator->type->ops.elevator_is_urgent_fn(q)) {
q->notified_urgent = true;
+ q->request_fn_active++;
q->urgent_request_fn(q);
- } else
+ q->request_fn_active--;
+ } else {
+ q->request_fn_active++;
q->request_fn(q);
- q->request_fn_active--;
+ q->request_fn_active--;
+ }
}
/**
@@ -1226,6 +1230,16 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
BUG_ON(blk_queued_rq(rq));
+ if (rq->cmd_flags & REQ_URGENT) {
+ /*
+ * It's not compliant with the design to re-insert
+ * urgent requests. We want to be able to track this
+ * down.
+ */
+ pr_err("%s(): requeueing an URGENT request", __func__);
+ WARN_ON(!q->dispatched_urgent);
+ q->dispatched_urgent = false;
+ }
elv_requeue_request(q, rq);
}
EXPORT_SYMBOL(blk_requeue_request);
@@ -1249,10 +1263,20 @@ int blk_reinsert_request(struct request_queue *q, struct request *rq)
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
- if (blk_rq_tagged(rq))
+ if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
BUG_ON(blk_queued_rq(rq));
+ if (rq->cmd_flags & REQ_URGENT) {
+ /*
+ * It's not compliant with the design to re-insert
+ * urgent requests. We want to be able to track this
+ * down.
+ */
+ pr_err("%s(): reinserting an URGENT request", __func__);
+ WARN_ON(!q->dispatched_urgent);
+ q->dispatched_urgent = false;
+ }
return elv_reinsert_request(q, rq);
}
@@ -2226,6 +2250,10 @@ struct request *blk_peek_request(struct request_queue *q)
* not be passed by new incoming requests
*/
rq->cmd_flags |= REQ_STARTED;
+ if (rq->cmd_flags & REQ_URGENT) {
+ WARN_ON(q->dispatched_urgent);
+ q->dispatched_urgent = true;
+ }
trace_block_rq_issue(q, rq);
}
diff --git a/block/elevator.c b/block/elevator.c
index 445718cc6..a4d6e54fe 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -786,10 +786,10 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
- if (test_bit(REQ_ATOM_URGENT, &rq->atomic_flags)) {
+ if (rq->cmd_flags & REQ_URGENT) {
q->notified_urgent = false;
+ WARN_ON(!q->dispatched_urgent);
q->dispatched_urgent = false;
- blk_clear_rq_urgent(rq);
}
/*
* request is released from the driver, io must be done
diff --git a/block/row-iosched.c b/block/row-iosched.c
index 666f4db7f..9d4ee917e 100644
--- a/block/row-iosched.c
+++ b/block/row-iosched.c
@@ -1,7 +1,7 @@
/*
* ROW (Read Over Write) I/O scheduler.
*
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -331,10 +331,6 @@ static void row_add_request(struct request_queue *q,
struct row_queue *rqueue = RQ_ROWQ(rq);
s64 diff_ms;
bool queue_was_empty = list_empty(&rqueue->fifo);
- unsigned long bv_page_flags = 0;
-
- if (rq->bio && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page)
- bv_page_flags = rq->bio->bi_io_vec->bv_page->flags;
list_add_tail(&rq->queuelist, &rqueue->fifo);
rd->nr_reqs[rq_data_dir(rq)]++;
@@ -367,9 +363,7 @@ static void row_add_request(struct request_queue *q,
rqueue->idle_data.begin_idling = false;
return;
}
-
- if ((bv_page_flags & (1L << PG_readahead)) ||
- (diff_ms < rd->rd_idle_data.freq_ms)) {
+ if (diff_ms < rd->rd_idle_data.freq_ms) {
rqueue->idle_data.begin_idling = true;
row_log_rowq(rd, rqueue->prio, "Enable idling");
} else {
@@ -806,7 +800,6 @@ static int row_init_queue(struct request_queue *q, struct elevator_type *e)
return -ENOMEM;
}
eq->elevator_data = rdata;
-
memset(rdata, 0, sizeof(*rdata));
for (i = 0; i < ROWQ_MAX_PRIO; i++) {
INIT_LIST_HEAD(&rdata->row_queues[i].fifo);
@@ -837,6 +830,7 @@ static int row_init_queue(struct request_queue *q, struct elevator_type *e)
rdata->last_served_ioprio_class = IOPRIO_CLASS_NONE;
rdata->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
rdata->dispatch_queue = q;
+
spin_lock_irq(q->queue_lock);
q->elevator = eq;
spin_unlock_irq(q->queue_lock);