forked from luck/tmp_suning_uos_patched
This tree adds the sched_set_fifo*() encapsulation APIs to remove
static priority level knowledge from non-scheduler code. The three APIs for non-scheduler code to set SCHED_FIFO are: - sched_set_fifo() - sched_set_fifo_low() - sched_set_normal() These are two FIFO priority levels: default (high), and a 'low' priority level, plus sched_set_normal() to set the policy back to non-SCHED_FIFO. Since the changes affect a lot of non-scheduler code, we kept this in a separate tree. When merging to the latest upstream tree there's a conflict in drivers/spi/spi.c, which can be resolved via: sched_set_fifo(ctlr->kworker_task); Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAl8pPQIRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1j0Jw/+LlSyX6gD2ATy3cizGL7DFPZogD5MVKTb IXbhXH/ACpuPQlBe1+haRLbJj6XfXqbOlAleVKt7eh+jZ1jYjC972RCSTO4566mJ 0v8Iy9kkEeb2TDbYx1H3bnk78lf85t0CB+sCzyKUYFuTrXU04eRj7MtN3vAQyRQU xJg83x/sT5DGdDTP50sL7lpbwk3INWkD0aDCJEaO/a9yHElMsTZiZBKoXxN/s30o FsfzW56jqtng771H2bo8ERN7+abwJg10crQU5mIaLhacNMETuz0NZ/f8fY/fydCL Ju8HAdNKNXyphWkAOmixQuyYtWKe2/GfbHg8hld0jmpwxkOSTgZjY+pFcv7/w306 g2l1TPOt8e1n5jbfnY3eig+9Kr8y0qHkXPfLfgRqKwMMaOqTTYixEzj+NdxEIRX9 Kr7oFAv6VEFfXGSpb5L1qyjIGVgQ5/JE/p3OC3GHEsw5VKiy5yjhNLoSmSGzdS61 1YurVvypSEUAn3DqTXgeGX76f0HH365fIKqmbFrUWxliF+YyflMhtrj2JFtejGzH Md3RgAzxusE9S6k3gw1ev4byh167bPBbY8jz0w3Gd7IBRKy9vo92h6ZRYIl6xeoC BU2To1IhCAydIr6hNsIiCSDTgiLbsYQzPuVVovUxNh+l1ZvKV2X+csEHhs8oW4pr 4BRU7dKL2NE= =/7JH -----END PGP SIGNATURE----- Merge tag 'sched-fifo-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull sched/fifo updates from Ingo Molnar: "This adds the sched_set_fifo*() encapsulation APIs to remove static priority level knowledge from non-scheduler code. The three APIs for non-scheduler code to set SCHED_FIFO are: - sched_set_fifo() - sched_set_fifo_low() - sched_set_normal() These are two FIFO priority levels: default (high), and a 'low' priority level, plus sched_set_normal() to set the policy back to non-SCHED_FIFO. Since the changes affect a lot of non-scheduler code, we kept this in a separate tree" * tag 'sched-fifo-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) sched,tracing: Convert to sched_set_fifo() sched: Remove sched_set_*() return value sched: Remove sched_setscheduler*() EXPORTs sched,psi: Convert to sched_set_fifo_low() sched,rcutorture: Convert to sched_set_fifo_low() sched,rcuperf: Convert to sched_set_fifo_low() sched,locktorture: Convert to sched_set_fifo() sched,irq: Convert to sched_set_fifo() sched,watchdog: Convert to sched_set_fifo() sched,serial: Convert to sched_set_fifo() sched,powerclamp: Convert to sched_set_fifo() sched,ion: Convert to sched_set_normal() sched,powercap: Convert to sched_set_fifo*() sched,spi: Convert to sched_set_fifo*() sched,mmc: Convert to sched_set_fifo*() sched,ivtv: Convert to sched_set_fifo*() sched,drm/scheduler: Convert to sched_set_fifo*() sched,msm: Convert to sched_set_fifo*() sched,psci: Convert to sched_set_fifo*() sched,drbd: Convert to sched_set_fifo*() ...
This commit is contained in:
commit
6d2b84a4e5
|
@ -270,12 +270,11 @@ static struct bL_thread bL_threads[NR_CPUS];
|
|||
static int bL_switcher_thread(void *arg)
|
||||
{
|
||||
struct bL_thread *t = arg;
|
||||
struct sched_param param = { .sched_priority = 1 };
|
||||
int cluster;
|
||||
bL_switch_completion_handler completer;
|
||||
void *completer_cookie;
|
||||
|
||||
sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
|
||||
sched_set_fifo_low(current);
|
||||
complete(&t->started);
|
||||
|
||||
do {
|
||||
|
|
|
@ -482,7 +482,6 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
|
|||
int (*cbk_do_batch)(struct crypto_engine *engine),
|
||||
bool rt, int qlen)
|
||||
{
|
||||
struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
|
||||
struct crypto_engine *engine;
|
||||
|
||||
if (!dev)
|
||||
|
@ -520,7 +519,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
|
|||
|
||||
if (engine->rt) {
|
||||
dev_info(dev, "will run requests pump with realtime priority\n");
|
||||
sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m);
|
||||
sched_set_fifo(engine->kworker->task);
|
||||
}
|
||||
|
||||
return engine;
|
||||
|
|
|
@ -136,12 +136,11 @@ static unsigned int idle_pct = 5; /* percentage */
|
|||
static unsigned int round_robin_time = 1; /* second */
|
||||
static int power_saving_thread(void *data)
|
||||
{
|
||||
struct sched_param param = {.sched_priority = 1};
|
||||
int do_sleep;
|
||||
unsigned int tsk_index = (unsigned long)data;
|
||||
u64 last_jiffies = 0;
|
||||
|
||||
sched_setscheduler(current, SCHED_RR, ¶m);
|
||||
sched_set_fifo_low(current);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
unsigned long expire_time;
|
||||
|
|
|
@ -6019,11 +6019,8 @@ int drbd_ack_receiver(struct drbd_thread *thi)
|
|||
unsigned int header_size = drbd_header_size(connection);
|
||||
int expect = header_size;
|
||||
bool ping_timeout_active = false;
|
||||
struct sched_param param = { .sched_priority = 2 };
|
||||
|
||||
rv = sched_setscheduler(current, SCHED_RR, ¶m);
|
||||
if (rv < 0)
|
||||
drbd_err(connection, "drbd_ack_receiver: ERROR set priority, ret=%d\n", rv);
|
||||
sched_set_fifo_low(current);
|
||||
|
||||
while (get_t_state(thi) == RUNNING) {
|
||||
drbd_thread_current_set_cpu(thi);
|
||||
|
|
|
@ -274,7 +274,6 @@ static int suspend_test_thread(void *arg)
|
|||
{
|
||||
int cpu = (long)arg;
|
||||
int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
|
||||
struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 };
|
||||
struct cpuidle_device *dev;
|
||||
struct cpuidle_driver *drv;
|
||||
/* No need for an actual callback, we just want to wake up the CPU. */
|
||||
|
@ -284,9 +283,7 @@ static int suspend_test_thread(void *arg)
|
|||
wait_for_completion(&suspend_threads_started);
|
||||
|
||||
/* Set maximum priority to preempt all other threads on this CPU. */
|
||||
if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority))
|
||||
pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
|
||||
cpu);
|
||||
sched_set_fifo(current);
|
||||
|
||||
dev = this_cpu_read(cpuidle_devices);
|
||||
drv = cpuidle_get_cpu_driver(dev);
|
||||
|
@ -351,11 +348,6 @@ static int suspend_test_thread(void *arg)
|
|||
if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
|
||||
complete(&suspend_threads_done);
|
||||
|
||||
/* Give up on RT scheduling and wait for termination. */
|
||||
sched_priority.sched_priority = 0;
|
||||
if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority))
|
||||
pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
|
||||
cpu);
|
||||
for (;;) {
|
||||
/* Needs to be set first to avoid missing a wakeup. */
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
|
|
@ -248,9 +248,6 @@ EXPORT_SYMBOL(drm_vblank_work_init);
|
|||
|
||||
int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
|
||||
{
|
||||
struct sched_param param = {
|
||||
.sched_priority = MAX_RT_PRIO - 1,
|
||||
};
|
||||
struct kthread_worker *worker;
|
||||
|
||||
INIT_LIST_HEAD(&vblank->pending_work);
|
||||
|
@ -263,5 +260,6 @@ int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
|
|||
|
||||
vblank->worker = worker;
|
||||
|
||||
return sched_setscheduler(vblank->worker->task, SCHED_FIFO, ¶m);
|
||||
sched_set_fifo(worker->task);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -401,7 +401,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
struct msm_kms *kms;
|
||||
struct msm_mdss *mdss;
|
||||
int ret, i;
|
||||
struct sched_param param;
|
||||
|
||||
ddev = drm_dev_alloc(drv, dev);
|
||||
if (IS_ERR(ddev)) {
|
||||
|
@ -507,12 +506,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
ddev->mode_config.funcs = &mode_config_funcs;
|
||||
ddev->mode_config.helper_private = &mode_config_helper_funcs;
|
||||
|
||||
/**
|
||||
* this priority was found during empiric testing to have appropriate
|
||||
* realtime scheduling to process display updates and interact with
|
||||
* other real time and normal priority task
|
||||
*/
|
||||
param.sched_priority = 16;
|
||||
for (i = 0; i < priv->num_crtcs; i++) {
|
||||
/* initialize event thread */
|
||||
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
|
||||
|
@ -524,11 +517,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
goto err_msm_uninit;
|
||||
}
|
||||
|
||||
ret = sched_setscheduler(priv->event_thread[i].worker->task,
|
||||
SCHED_FIFO, ¶m);
|
||||
if (ret)
|
||||
dev_warn(dev, "event_thread set priority failed:%d\n",
|
||||
ret);
|
||||
sched_set_fifo(priv->event_thread[i].worker->task);
|
||||
}
|
||||
|
||||
ret = drm_vblank_init(ddev, priv->num_crtcs);
|
||||
|
|
|
@ -762,11 +762,10 @@ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
|
|||
*/
|
||||
static int drm_sched_main(void *param)
|
||||
{
|
||||
struct sched_param sparam = {.sched_priority = 1};
|
||||
struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
|
||||
int r;
|
||||
|
||||
sched_setscheduler(current, SCHED_FIFO, &sparam);
|
||||
sched_set_fifo_low(current);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
struct drm_sched_entity *entity = NULL;
|
||||
|
|
|
@ -737,8 +737,6 @@ static void ivtv_process_options(struct ivtv *itv)
|
|||
*/
|
||||
static int ivtv_init_struct1(struct ivtv *itv)
|
||||
{
|
||||
struct sched_param param = { .sched_priority = 99 };
|
||||
|
||||
itv->base_addr = pci_resource_start(itv->pdev, 0);
|
||||
itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */
|
||||
itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */
|
||||
|
@ -758,7 +756,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
|
|||
return -1;
|
||||
}
|
||||
/* must use the FIFO scheduler as it is realtime sensitive */
|
||||
sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, ¶m);
|
||||
sched_set_fifo(itv->irq_worker_task);
|
||||
|
||||
kthread_init_work(&itv->irq_work, ivtv_irq_work_handler);
|
||||
|
||||
|
|
|
@ -139,11 +139,10 @@ EXPORT_SYMBOL_GPL(sdio_signal_irq);
|
|||
static int sdio_irq_thread(void *_host)
|
||||
{
|
||||
struct mmc_host *host = _host;
|
||||
struct sched_param param = { .sched_priority = 1 };
|
||||
unsigned long period, idle_period;
|
||||
int ret;
|
||||
|
||||
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
||||
sched_set_fifo_low(current);
|
||||
|
||||
/*
|
||||
* We want to allow for SDIO cards to work even on non SDIO
|
||||
|
|
|
@ -709,9 +709,6 @@ static void cros_ec_spi_high_pri_release(void *worker)
|
|||
static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
|
||||
struct cros_ec_spi *ec_spi)
|
||||
{
|
||||
struct sched_param sched_priority = {
|
||||
.sched_priority = MAX_RT_PRIO / 2,
|
||||
};
|
||||
int err;
|
||||
|
||||
ec_spi->high_pri_worker =
|
||||
|
@ -728,11 +725,9 @@ static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = sched_setscheduler_nocheck(ec_spi->high_pri_worker->task,
|
||||
SCHED_FIFO, &sched_priority);
|
||||
if (err)
|
||||
dev_err(dev, "Can't set cros_ec high pri priority: %d\n", err);
|
||||
return err;
|
||||
sched_set_fifo(ec_spi->high_pri_worker->task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cros_ec_spi_probe(struct spi_device *spi)
|
||||
|
|
|
@ -268,9 +268,7 @@ void idle_inject_stop(struct idle_inject_device *ii_dev)
|
|||
*/
|
||||
static void idle_inject_setup(unsigned int cpu)
|
||||
{
|
||||
struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
|
||||
|
||||
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
||||
sched_set_fifo(current);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1626,11 +1626,9 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
|
|||
*/
|
||||
static void spi_set_thread_rt(struct spi_controller *ctlr)
|
||||
{
|
||||
struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
|
||||
|
||||
dev_info(&ctlr->dev,
|
||||
"will run message pump with realtime priority\n");
|
||||
sched_setscheduler(ctlr->kworker->task, SCHED_FIFO, ¶m);
|
||||
sched_set_fifo(ctlr->kworker->task);
|
||||
}
|
||||
|
||||
static int spi_init_queue(struct spi_controller *ctlr)
|
||||
|
|
|
@ -244,8 +244,6 @@ static int ion_heap_deferred_free(void *data)
|
|||
|
||||
int ion_heap_init_deferred_free(struct ion_heap *heap)
|
||||
{
|
||||
struct sched_param param = { .sched_priority = 0 };
|
||||
|
||||
INIT_LIST_HEAD(&heap->free_list);
|
||||
init_waitqueue_head(&heap->waitqueue);
|
||||
heap->task = kthread_run(ion_heap_deferred_free, heap,
|
||||
|
@ -255,7 +253,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
|
|||
__func__);
|
||||
return PTR_ERR_OR_ZERO(heap->task);
|
||||
}
|
||||
sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
|
||||
sched_set_normal(heap->task, 19);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -70,9 +70,6 @@ static unsigned int control_cpu; /* The cpu assigned to collect stat and update
|
|||
*/
|
||||
static bool clamping;
|
||||
|
||||
static const struct sched_param sparam = {
|
||||
.sched_priority = MAX_USER_RT_PRIO / 2,
|
||||
};
|
||||
struct powerclamp_worker_data {
|
||||
struct kthread_worker *worker;
|
||||
struct kthread_work balancing_work;
|
||||
|
@ -488,7 +485,7 @@ static void start_power_clamp_worker(unsigned long cpu)
|
|||
w_data->cpu = cpu;
|
||||
w_data->clamping = true;
|
||||
set_bit(cpu, cpu_clamping_mask);
|
||||
sched_setscheduler(worker->task, SCHED_FIFO, &sparam);
|
||||
sched_set_fifo(worker->task);
|
||||
kthread_init_work(&w_data->balancing_work, clamp_balancing_func);
|
||||
kthread_init_delayed_work(&w_data->idle_injection_work,
|
||||
clamp_idle_injection_func);
|
||||
|
|
|
@ -1179,7 +1179,6 @@ static int sc16is7xx_probe(struct device *dev,
|
|||
const struct sc16is7xx_devtype *devtype,
|
||||
struct regmap *regmap, int irq)
|
||||
{
|
||||
struct sched_param sched_param = { .sched_priority = MAX_RT_PRIO / 2 };
|
||||
unsigned long freq = 0, *pfreq = dev_get_platdata(dev);
|
||||
unsigned int val;
|
||||
u32 uartclk = 0;
|
||||
|
@ -1239,7 +1238,7 @@ static int sc16is7xx_probe(struct device *dev,
|
|||
ret = PTR_ERR(s->kworker_task);
|
||||
goto out_clk;
|
||||
}
|
||||
sched_setscheduler(s->kworker_task, SCHED_FIFO, &sched_param);
|
||||
sched_set_fifo(s->kworker_task);
|
||||
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
if (devtype->nr_gpio) {
|
||||
|
|
|
@ -1144,14 +1144,13 @@ void watchdog_dev_unregister(struct watchdog_device *wdd)
|
|||
int __init watchdog_dev_init(void)
|
||||
{
|
||||
int err;
|
||||
struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1,};
|
||||
|
||||
watchdog_kworker = kthread_create_worker(0, "watchdogd");
|
||||
if (IS_ERR(watchdog_kworker)) {
|
||||
pr_err("Failed to create watchdog kworker\n");
|
||||
return PTR_ERR(watchdog_kworker);
|
||||
}
|
||||
sched_setscheduler(watchdog_kworker->task, SCHED_FIFO, ¶m);
|
||||
sched_set_fifo(watchdog_kworker->task);
|
||||
|
||||
err = class_register(&watchdog_class);
|
||||
if (err < 0) {
|
||||
|
|
|
@ -1648,6 +1648,9 @@ extern int idle_cpu(int cpu);
|
|||
extern int available_idle_cpu(int cpu);
|
||||
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
|
||||
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
|
||||
extern void sched_set_fifo(struct task_struct *p);
|
||||
extern void sched_set_fifo_low(struct task_struct *p);
|
||||
extern void sched_set_normal(struct task_struct *p, int nice);
|
||||
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
|
||||
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
|
||||
extern struct task_struct *idle_task(int cpu);
|
||||
|
|
|
@ -1308,9 +1308,6 @@ static int
|
|||
setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
|
||||
{
|
||||
struct task_struct *t;
|
||||
struct sched_param param = {
|
||||
.sched_priority = MAX_USER_RT_PRIO/2,
|
||||
};
|
||||
|
||||
if (!secondary) {
|
||||
t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
|
||||
|
@ -1318,13 +1315,12 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
|
|||
} else {
|
||||
t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
|
||||
new->name);
|
||||
param.sched_priority -= 1;
|
||||
}
|
||||
|
||||
if (IS_ERR(t))
|
||||
return PTR_ERR(t);
|
||||
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
|
||||
sched_set_fifo(t);
|
||||
|
||||
/*
|
||||
* We keep the reference to the task struct even if
|
||||
|
|
|
@ -436,8 +436,6 @@ static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
|
|||
|
||||
static void torture_rtmutex_boost(struct torture_random_state *trsp)
|
||||
{
|
||||
int policy;
|
||||
struct sched_param param;
|
||||
const unsigned int factor = 50000; /* yes, quite arbitrary */
|
||||
|
||||
if (!rt_task(current)) {
|
||||
|
@ -448,8 +446,7 @@ static void torture_rtmutex_boost(struct torture_random_state *trsp)
|
|||
*/
|
||||
if (trsp && !(torture_random(trsp) %
|
||||
(cxt.nrealwriters_stress * factor))) {
|
||||
policy = SCHED_FIFO;
|
||||
param.sched_priority = MAX_RT_PRIO - 1;
|
||||
sched_set_fifo(current);
|
||||
} else /* common case, do nothing */
|
||||
return;
|
||||
} else {
|
||||
|
@ -462,13 +459,10 @@ static void torture_rtmutex_boost(struct torture_random_state *trsp)
|
|||
*/
|
||||
if (!trsp || !(torture_random(trsp) %
|
||||
(cxt.nrealwriters_stress * factor * 2))) {
|
||||
policy = SCHED_NORMAL;
|
||||
param.sched_priority = 0;
|
||||
sched_set_normal(current, 0);
|
||||
} else /* common case, do nothing */
|
||||
return;
|
||||
}
|
||||
|
||||
sched_setscheduler_nocheck(current, policy, ¶m);
|
||||
}
|
||||
|
||||
static void torture_rtmutex_delay(struct torture_random_state *trsp)
|
||||
|
|
|
@ -361,7 +361,6 @@ rcu_perf_writer(void *arg)
|
|||
int i_max;
|
||||
long me = (long)arg;
|
||||
struct rcu_head *rhp = NULL;
|
||||
struct sched_param sp;
|
||||
bool started = false, done = false, alldone = false;
|
||||
u64 t;
|
||||
u64 *wdp;
|
||||
|
@ -370,8 +369,7 @@ rcu_perf_writer(void *arg)
|
|||
VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
|
||||
WARN_ON(!wdpp);
|
||||
set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
|
||||
sp.sched_priority = 1;
|
||||
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
||||
sched_set_fifo_low(current);
|
||||
|
||||
if (holdoff)
|
||||
schedule_timeout_uninterruptible(holdoff * HZ);
|
||||
|
@ -427,9 +425,7 @@ rcu_perf_writer(void *arg)
|
|||
started = true;
|
||||
if (!done && i >= MIN_MEAS) {
|
||||
done = true;
|
||||
sp.sched_priority = 0;
|
||||
sched_setscheduler_nocheck(current,
|
||||
SCHED_NORMAL, &sp);
|
||||
sched_set_normal(current, 0);
|
||||
pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
|
||||
perf_type, PERF_FLAG, me, MIN_MEAS);
|
||||
if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
|
||||
|
|
|
@ -895,16 +895,11 @@ static int rcu_torture_boost(void *arg)
|
|||
unsigned long endtime;
|
||||
unsigned long oldstarttime;
|
||||
struct rcu_boost_inflight rbi = { .inflight = 0 };
|
||||
struct sched_param sp;
|
||||
|
||||
VERBOSE_TOROUT_STRING("rcu_torture_boost started");
|
||||
|
||||
/* Set real-time priority. */
|
||||
sp.sched_priority = 1;
|
||||
if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
|
||||
VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
|
||||
n_rcu_torture_boost_rterror++;
|
||||
}
|
||||
sched_set_fifo_low(current);
|
||||
|
||||
init_rcu_head_on_stack(&rbi.rcu);
|
||||
/* Each pass through the following loop does one boost-test cycle. */
|
||||
|
|
|
@ -5496,6 +5496,8 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
|
|||
* @policy: new policy.
|
||||
* @param: structure containing the new RT priority.
|
||||
*
|
||||
* Use sched_set_fifo(), read its comment.
|
||||
*
|
||||
* Return: 0 on success. An error code otherwise.
|
||||
*
|
||||
* NOTE that the task may be already dead.
|
||||
|
@ -5505,13 +5507,11 @@ int sched_setscheduler(struct task_struct *p, int policy,
|
|||
{
|
||||
return _sched_setscheduler(p, policy, param, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_setscheduler);
|
||||
|
||||
int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
|
||||
{
|
||||
return __sched_setscheduler(p, attr, true, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_setattr);
|
||||
|
||||
int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
|
||||
{
|
||||
|
@ -5536,7 +5536,51 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy,
|
|||
{
|
||||
return _sched_setscheduler(p, policy, param, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
|
||||
|
||||
/*
|
||||
* SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
|
||||
* incapable of resource management, which is the one thing an OS really should
|
||||
* be doing.
|
||||
*
|
||||
* This is of course the reason it is limited to privileged users only.
|
||||
*
|
||||
* Worse still; it is fundamentally impossible to compose static priority
|
||||
* workloads. You cannot take two correctly working static prio workloads
|
||||
* and smash them together and still expect them to work.
|
||||
*
|
||||
* For this reason 'all' FIFO tasks the kernel creates are basically at:
|
||||
*
|
||||
* MAX_RT_PRIO / 2
|
||||
*
|
||||
* The administrator _MUST_ configure the system, the kernel simply doesn't
|
||||
* know enough information to make a sensible choice.
|
||||
*/
|
||||
void sched_set_fifo(struct task_struct *p)
|
||||
{
|
||||
struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
|
||||
WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_set_fifo);
|
||||
|
||||
/*
|
||||
* For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
|
||||
*/
|
||||
void sched_set_fifo_low(struct task_struct *p)
|
||||
{
|
||||
struct sched_param sp = { .sched_priority = 1 };
|
||||
WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_set_fifo_low);
|
||||
|
||||
void sched_set_normal(struct task_struct *p, int nice)
|
||||
{
|
||||
struct sched_attr attr = {
|
||||
.sched_policy = SCHED_NORMAL,
|
||||
.sched_nice = nice,
|
||||
};
|
||||
WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_set_normal);
|
||||
|
||||
static int
|
||||
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
|
||||
|
|
|
@ -616,11 +616,8 @@ static void psi_poll_work(struct psi_group *group)
|
|||
static int psi_poll_worker(void *data)
|
||||
{
|
||||
struct psi_group *group = (struct psi_group *)data;
|
||||
struct sched_param param = {
|
||||
.sched_priority = 1,
|
||||
};
|
||||
|
||||
sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
|
||||
sched_set_fifo_low(current);
|
||||
|
||||
while (true) {
|
||||
wait_event_interruptible(group->poll_wait,
|
||||
|
|
|
@ -45,8 +45,8 @@ MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
|
|||
static int producer_nice = MAX_NICE;
|
||||
static int consumer_nice = MAX_NICE;
|
||||
|
||||
static int producer_fifo = -1;
|
||||
static int consumer_fifo = -1;
|
||||
static int producer_fifo;
|
||||
static int consumer_fifo;
|
||||
|
||||
module_param(producer_nice, int, 0644);
|
||||
MODULE_PARM_DESC(producer_nice, "nice prio for producer");
|
||||
|
@ -55,10 +55,10 @@ module_param(consumer_nice, int, 0644);
|
|||
MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
|
||||
|
||||
module_param(producer_fifo, int, 0644);
|
||||
MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
|
||||
MODULE_PARM_DESC(producer_fifo, "use fifo for producer: 0 - disabled, 1 - low prio, 2 - fifo");
|
||||
|
||||
module_param(consumer_fifo, int, 0644);
|
||||
MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
|
||||
MODULE_PARM_DESC(consumer_fifo, "use fifo for consumer: 0 - disabled, 1 - low prio, 2 - fifo");
|
||||
|
||||
static int read_events;
|
||||
|
||||
|
@ -303,22 +303,22 @@ static void ring_buffer_producer(void)
|
|||
trace_printk("ERROR!\n");
|
||||
|
||||
if (!disable_reader) {
|
||||
if (consumer_fifo < 0)
|
||||
if (consumer_fifo)
|
||||
trace_printk("Running Consumer at SCHED_FIFO %s\n",
|
||||
consumer_fifo == 1 ? "low" : "high");
|
||||
else
|
||||
trace_printk("Running Consumer at nice: %d\n",
|
||||
consumer_nice);
|
||||
else
|
||||
trace_printk("Running Consumer at SCHED_FIFO %d\n",
|
||||
consumer_fifo);
|
||||
}
|
||||
if (producer_fifo < 0)
|
||||
if (producer_fifo)
|
||||
trace_printk("Running Producer at SCHED_FIFO %s\n",
|
||||
producer_fifo == 1 ? "low" : "high");
|
||||
else
|
||||
trace_printk("Running Producer at nice: %d\n",
|
||||
producer_nice);
|
||||
else
|
||||
trace_printk("Running Producer at SCHED_FIFO %d\n",
|
||||
producer_fifo);
|
||||
|
||||
/* Let the user know that the test is running at low priority */
|
||||
if (producer_fifo < 0 && consumer_fifo < 0 &&
|
||||
if (!producer_fifo && !consumer_fifo &&
|
||||
producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
|
||||
trace_printk("WARNING!!! This test is running at lowest priority.\n");
|
||||
|
||||
|
@ -455,21 +455,19 @@ static int __init ring_buffer_benchmark_init(void)
|
|||
* Run them as low-prio background tasks by default:
|
||||
*/
|
||||
if (!disable_reader) {
|
||||
if (consumer_fifo >= 0) {
|
||||
struct sched_param param = {
|
||||
.sched_priority = consumer_fifo
|
||||
};
|
||||
sched_setscheduler(consumer, SCHED_FIFO, ¶m);
|
||||
} else
|
||||
if (consumer_fifo >= 2)
|
||||
sched_set_fifo(consumer);
|
||||
else if (consumer_fifo == 1)
|
||||
sched_set_fifo_low(consumer);
|
||||
else
|
||||
set_user_nice(consumer, consumer_nice);
|
||||
}
|
||||
|
||||
if (producer_fifo >= 0) {
|
||||
struct sched_param param = {
|
||||
.sched_priority = producer_fifo
|
||||
};
|
||||
sched_setscheduler(producer, SCHED_FIFO, ¶m);
|
||||
} else
|
||||
if (producer_fifo >= 2)
|
||||
sched_set_fifo(producer);
|
||||
else if (producer_fifo == 1)
|
||||
sched_set_fifo_low(producer);
|
||||
else
|
||||
set_user_nice(producer, producer_nice);
|
||||
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue
Block a user