aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-settings.c12
-rw-r--r--block/blk.h11
-rw-r--r--block/elevator.c5
4 files changed, 52 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 570819b04..9f671cd6c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -312,7 +312,14 @@ inline void __blk_run_queue_uncond(struct request_queue *q)
* can wait until all these request_fn calls have finished.
*/
q->request_fn_active++;
- q->request_fn(q);
+ if (!q->notified_urgent &&
+ q->elevator->type->ops.elevator_is_urgent_fn &&
+ q->urgent_request_fn &&
+ q->elevator->type->ops.elevator_is_urgent_fn(q)) {
+ q->notified_urgent = true;
+ q->urgent_request_fn(q);
+ } else
+ q->request_fn(q);
q->request_fn_active--;
}
@@ -323,6 +330,12 @@ inline void __blk_run_queue_uncond(struct request_queue *q)
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
+ * Device driver will be notified of an urgent request
+ * pending under the following conditions:
+ * 1. The driver and the current scheduler support urgent reques handling
+ * 2. There is an urgent request pending in the scheduler
+ * 3. There isn't already an urgent request in flight, meaning previously
+ * notified urgent request completed (!q->notified_urgent)
*/
void __blk_run_queue(struct request_queue *q)
{
@@ -2351,8 +2364,17 @@ struct request *blk_fetch_request(struct request_queue *q)
struct request *rq;
rq = blk_peek_request(q);
- if (rq)
+ if (rq) {
+ /*
+ * Assumption: the next request fetched from scheduler after we
+ * notified "urgent request pending" - will be the urgent one
+ */
+ if (q->notified_urgent && !q->dispatched_urgent) {
+ q->dispatched_urgent = true;
+ (void)blk_mark_rq_urgent(rq);
+ }
blk_start_request(rq);
+ }
return rq;
}
EXPORT_SYMBOL(blk_fetch_request);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index ec00a0f75..f19c2bce6 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -100,6 +100,18 @@ void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
/**
+ * blk_urgent_request() - Set an urgent_request handler function for queue
+ * @q: queue
+ * @fn: handler for urgent requests
+ *
+ */
+void blk_urgent_request(struct request_queue *q, request_fn_proc *fn)
+{
+ q->urgent_request_fn = fn;
+}
+EXPORT_SYMBOL(blk_urgent_request);
+
+/**
* blk_set_default_limits - reset limits to default values
* @lim: the queue_limits structure to reset
*
diff --git a/block/blk.h b/block/blk.h
index b3bdeb36f..f1f44408b 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -42,6 +42,7 @@ void blk_add_timer(struct request *);
*/
enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0,
+ REQ_ATOM_URGENT = 1,
};
/*
@@ -58,6 +59,16 @@ static inline void blk_clear_rq_complete(struct request *rq)
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
+static inline int blk_mark_rq_urgent(struct request *rq)
+{
+ return test_and_set_bit(REQ_ATOM_URGENT, &rq->atomic_flags);
+}
+
+static inline void blk_clear_rq_urgent(struct request *rq)
+{
+ clear_bit(REQ_ATOM_URGENT, &rq->atomic_flags);
+}
+
/*
* Internal elevator interface
*/
diff --git a/block/elevator.c b/block/elevator.c
index 3d1c3a729..3bfb00dbb 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -786,6 +786,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
+ if (test_bit(REQ_ATOM_URGENT, &rq->atomic_flags)) {
+ q->notified_urgent = false;
+ q->dispatched_urgent = false;
+ blk_clear_rq_urgent(rq);
+ }
/*
* request is released from the driver, io must be done
*/