In order to allow for filtering of IO based on some other properties
of the request than direction we allow the bucket function to return
an int.
If the bucket callback returns a negative do no count it in the stats
accumulation.
Signed-off-by: Stephen Bates <[email protected]>
Fixed up Kyber scheduler stat callback.
Signed-off-by: Jens Axboe <[email protected]>
bool enable_accounting;
};
-unsigned int blk_stat_rq_ddir(const struct request *rq)
+int blk_stat_rq_ddir(const struct request *rq)
{
return rq_data_dir(rq);
}
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
if (blk_stat_is_active(cb)) {
bucket = cb->bucket_fn(rq);
+ if (bucket < 0)
+ continue;
stat = &this_cpu_ptr(cb->cpu_stat)[bucket];
__blk_stat_add(stat, value);
}
struct blk_stat_callback *
blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
- unsigned int (*bucket_fn)(const struct request *),
+ int (*bucket_fn)(const struct request *),
unsigned int buckets, void *data)
{
struct blk_stat_callback *cb;
/**
* @bucket_fn: Given a request, returns which statistics bucket it
- * should be accounted under.
+ * should be accounted under. Return -1 for no bucket for this
+ * request.
*/
- unsigned int (*bucket_fn)(const struct request *);
+ int (*bucket_fn)(const struct request *);
/**
* @buckets: Number of statistics buckets.
*
* Return: Data direction of the request, either READ or WRITE.
*/
-unsigned int blk_stat_rq_ddir(const struct request *rq);
+int blk_stat_rq_ddir(const struct request *rq);
/**
* blk_stat_alloc_callback() - Allocate a block statistics callback.
*/
struct blk_stat_callback *
blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
- unsigned int (*bucket_fn)(const struct request *),
+ int (*bucket_fn)(const struct request *),
unsigned int buckets, void *data);
/**
atomic_t wait_index[KYBER_NUM_DOMAINS];
};
-static unsigned int rq_sched_domain(const struct request *rq)
+static int rq_sched_domain(const struct request *rq)
{
unsigned int op = rq->cmd_flags;