#include <plat/mailbox.h>
-static struct workqueue_struct *mboxd;
static struct omap_mbox **mboxes;
static int mbox_configured;
/* no more messages in the fifo. clear IRQ source. */
ack_mbox_irq(mbox, IRQ_RX);
nomem:
- queue_work(mboxd, &mbox->rxq->work);
+ schedule_work(&mbox->rxq->work);
}
static irqreturn_t mbox_interrupt(int irq, void *p)
if (!--mbox->use_count) {
free_irq(mbox->irq, mbox);
tasklet_kill(&mbox->txq->tasklet);
- flush_work(&mbox->rxq->work);
+ flush_work_sync(&mbox->rxq->work);
mbox_queue_free(mbox->txq);
mbox_queue_free(mbox->rxq);
}
if (err)
return err;
- mboxd = create_workqueue("mboxd");
- if (!mboxd)
- return -ENOMEM;
-
/* kfifo size sanity check: alignment and minimal size */
mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t));
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
static void __exit omap_mbox_exit(void)
{
- destroy_workqueue(mboxd);
class_unregister(&omap_mbox_class);
}
module_exit(omap_mbox_exit);
acpi_status __init acpi_os_initialize1(void)
{
- kacpid_wq = create_workqueue("kacpid");
- kacpi_notify_wq = create_workqueue("kacpi_notify");
- kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
+ kacpid_wq = alloc_workqueue("kacpid", 0, 1);
+ kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
*/
static DEFINE_MUTEX(dbs_mutex);
-static struct workqueue_struct *kconservative_wq;
-
static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
dbs_check_cpu(dbs_info);
- queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
+ schedule_delayed_work_on(cpu, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
}
dbs_info->enable = 1;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
- queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
- delay);
+ schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
static int __init cpufreq_gov_dbs_init(void)
{
- int err;
-
- kconservative_wq = create_workqueue("kconservative");
- if (!kconservative_wq) {
- printk(KERN_ERR "Creation of kconservative failed\n");
- return -EFAULT;
- }
-
- err = cpufreq_register_governor(&cpufreq_gov_conservative);
- if (err)
- destroy_workqueue(kconservative_wq);
-
- return err;
+ return cpufreq_register_governor(&cpufreq_gov_conservative);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_conservative);
- destroy_workqueue(kconservative_wq);
}
*/
static DEFINE_MUTEX(dbs_mutex);
-static struct workqueue_struct *kondemand_wq;
-
static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int up_threshold;
__cpufreq_driver_target(dbs_info->cur_policy,
dbs_info->freq_lo, CPUFREQ_RELATION_H);
}
- queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
+ schedule_delayed_work_on(cpu, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
}
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
- queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
- delay);
+ schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
static int __init cpufreq_gov_dbs_init(void)
{
- int err;
cputime64_t wall;
u64 idle_time;
int cpu = get_cpu();
MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
}
- kondemand_wq = create_workqueue("kondemand");
- if (!kondemand_wq) {
- printk(KERN_ERR "Creation of kondemand failed\n");
- return -EFAULT;
- }
- err = cpufreq_register_governor(&cpufreq_gov_ondemand);
- if (err)
- destroy_workqueue(kondemand_wq);
-
- return err;
+ return cpufreq_register_governor(&cpufreq_gov_ondemand);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
- destroy_workqueue(kondemand_wq);
}
struct input_dev *input_dev;
struct device *dev;
char phys[32];
- struct workqueue_struct *wq;
struct delayed_work work;
unsigned polling; /* polling is active */
struct ts_event tc;
poll = 1;
if (poll) {
- schd = queue_delayed_work(tsc->wq, &tsc->work,
- msecs_to_jiffies(tsc->poll_period));
+ schd = schedule_delayed_work(&tsc->work,
+ msecs_to_jiffies(tsc->poll_period));
if (schd)
tsc->polling = 1;
else {
tsc->input_dev = input_dev;
INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler);
- tsc->wq = create_workqueue("TPS6507x Touchscreen");
if (init_data) {
tsc->poll_period = init_data->poll_period;
if (error)
goto err2;
- schd = queue_delayed_work(tsc->wq, &tsc->work,
- msecs_to_jiffies(tsc->poll_period));
+ schd = schedule_delayed_work(&tsc->work,
+ msecs_to_jiffies(tsc->poll_period));
if (schd)
tsc->polling = 1;
err2:
cancel_delayed_work_sync(&tsc->work);
- destroy_workqueue(tsc->wq);
input_free_device(input_dev);
err1:
kfree(tsc);
struct input_dev *input_dev = tsc->input_dev;
cancel_delayed_work_sync(&tsc->work);
- destroy_workqueue(tsc->wq);
input_unregister_device(input_dev);
{
int ret = -ENOMEM;
- md_wq = alloc_workqueue("md", WQ_RESCUER, 0);
+ md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
osm_debug("Register driver %s\n", drv->name);
if (drv->event) {
- drv->event_queue = create_workqueue(drv->name);
+ drv->event_queue = alloc_workqueue(drv->name,
+ WQ_MEM_RECLAIM, 1);
if (!drv->event_queue) {
osm_err("Could not initialize event queue for driver "
"%s\n", drv->name);
u32 barker;
struct iwmct_dbg dbg;
- /* drivers work queue */
- struct workqueue_struct *wq;
- struct workqueue_struct *bus_rescan_wq;
+ /* drivers work items */
struct work_struct bus_rescan_worker;
struct work_struct isr_worker;
switch (msg->hdr.opcode) {
case OP_OPR_ALIVE:
LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
- queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker);
+ schedule_work(&priv->bus_rescan_worker);
break;
default:
LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
/* clear the function's interrupt request bit (write 1 to clear) */
sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
- queue_work(priv->wq, &priv->isr_worker);
+ schedule_work(&priv->isr_worker);
LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
priv->func = func;
sdio_set_drvdata(func, priv);
-
- /* create drivers work queue */
- priv->wq = create_workqueue(DRV_NAME "_wq");
- priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
sdio_release_irq(func);
sdio_release_host(func);
- /* Safely destroy osc workqueue */
- destroy_workqueue(priv->bus_rescan_wq);
- destroy_workqueue(priv->wq);
+ /* Make sure works are finished */
+ flush_work_sync(&priv->bus_rescan_worker);
+ flush_work_sync(&priv->isr_worker);
sdio_claim_host(func);
sdio_disable_func(func);
netif_stop_queue(priv->net_dev);
priv->status |= STATUS_RESET_PENDING;
if (priv->reset_backoff)
- queue_delayed_work(priv->workqueue, &priv->reset_work,
- priv->reset_backoff * HZ);
+ schedule_delayed_work(&priv->reset_work,
+ priv->reset_backoff * HZ);
else
- queue_delayed_work(priv->workqueue, &priv->reset_work,
- 0);
+ schedule_delayed_work(&priv->reset_work, 0);
if (priv->reset_backoff < MAX_RESET_BACKOFF)
priv->reset_backoff++;
if (priv->stop_hang_check) {
priv->stop_hang_check = 0;
- queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
+ schedule_delayed_work(&priv->hang_check, HZ / 2);
}
fail_up:
if (priv->stop_rf_kill) {
priv->stop_rf_kill = 0;
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
}
deferred = 1;
priv->status |= STATUS_ASSOCIATING;
priv->connect_start = get_seconds();
- queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10);
+ schedule_delayed_work(&priv->wx_event_work, HZ / 10);
}
static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
return;
if (priv->status & STATUS_SECURITY_UPDATED)
- queue_delayed_work(priv->workqueue, &priv->security_work, 0);
+ schedule_delayed_work(&priv->security_work, 0);
- queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
+ schedule_delayed_work(&priv->wx_event_work, 0);
}
static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
/* Make sure the RF Kill check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
}
static void send_scan_event(void *data)
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event_later))
- queue_delayed_work(priv->workqueue,
- &priv->scan_event_later,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event_later,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
priv->user_requested_scan = 0;
cancel_delayed_work(&priv->scan_event_later);
- queue_work(priv->workqueue, &priv->scan_event_now);
+ schedule_work(&priv->scan_event_now);
}
}
/* Make sure the RF_KILL check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
} else
schedule_reset(priv);
}
IPW_DEBUG_INFO("exit\n");
}
-static void ipw2100_kill_workqueue(struct ipw2100_priv *priv)
+static void ipw2100_kill_works(struct ipw2100_priv *priv)
{
- if (priv->workqueue) {
- priv->stop_rf_kill = 1;
- priv->stop_hang_check = 1;
- cancel_delayed_work(&priv->reset_work);
- cancel_delayed_work(&priv->security_work);
- cancel_delayed_work(&priv->wx_event_work);
- cancel_delayed_work(&priv->hang_check);
- cancel_delayed_work(&priv->rf_kill);
- cancel_delayed_work(&priv->scan_event_later);
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- }
+ priv->stop_rf_kill = 1;
+ priv->stop_hang_check = 1;
+ cancel_delayed_work_sync(&priv->reset_work);
+ cancel_delayed_work_sync(&priv->security_work);
+ cancel_delayed_work_sync(&priv->wx_event_work);
+ cancel_delayed_work_sync(&priv->hang_check);
+ cancel_delayed_work_sync(&priv->rf_kill);
+ cancel_work_sync(&priv->scan_event_now);
+ cancel_delayed_work_sync(&priv->scan_event_later);
}
static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
priv->last_rtc = rtc;
if (!priv->stop_hang_check)
- queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
+ schedule_delayed_work(&priv->hang_check, HZ / 2);
spin_unlock_irqrestore(&priv->low_lock, flags);
}
if (rf_kill_active(priv)) {
IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
if (!priv->stop_rf_kill)
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
goto exit_unlock;
}
INIT_LIST_HEAD(&priv->fw_pend_list);
INIT_STAT(&priv->fw_pend_stat);
- priv->workqueue = create_workqueue(DRV_NAME);
-
INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
if (dev->irq)
free_irq(dev->irq, priv);
- ipw2100_kill_workqueue(priv);
+ ipw2100_kill_works(priv);
/* These are safe to call even if they weren't allocated */
ipw2100_queues_free(priv);
* first, then close() will crash. */
unregister_netdev(dev);
- /* ipw2100_down will ensure that there is no more pending work
- * in the workqueue's, so we can safely remove them now. */
- ipw2100_kill_workqueue(priv);
+ ipw2100_kill_works(priv);
ipw2100_queues_free(priv);
struct tasklet_struct irq_tasklet;
- struct workqueue_struct *workqueue;
struct delayed_work reset_work;
struct delayed_work security_work;
struct delayed_work wx_event_work;
/* If we aren't associated, schedule turning the LED off */
if (!(priv->status & STATUS_ASSOCIATED))
- queue_delayed_work(priv->workqueue,
- &priv->led_link_off,
- LD_TIME_LINK_ON);
+ schedule_delayed_work(&priv->led_link_off,
+ LD_TIME_LINK_ON);
}
spin_unlock_irqrestore(&priv->lock, flags);
* turning the LED on (blink while unassociated) */
if (!(priv->status & STATUS_RF_KILL_MASK) &&
!(priv->status & STATUS_ASSOCIATED))
- queue_delayed_work(priv->workqueue, &priv->led_link_on,
- LD_TIME_LINK_OFF);
+ schedule_delayed_work(&priv->led_link_on,
+ LD_TIME_LINK_OFF);
}
priv->status |= STATUS_LED_ACT_ON;
cancel_delayed_work(&priv->led_act_off);
- queue_delayed_work(priv->workqueue, &priv->led_act_off,
- LD_TIME_ACT_ON);
+ schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
} else {
/* Reschedule LED off for full time period */
cancel_delayed_work(&priv->led_act_off);
- queue_delayed_work(priv->workqueue, &priv->led_act_off,
- LD_TIME_ACT_ON);
+ schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
}
}
if (disable_radio) {
priv->status |= STATUS_RF_KILL_SW;
- if (priv->workqueue) {
- cancel_delayed_work(&priv->request_scan);
- cancel_delayed_work(&priv->request_direct_scan);
- cancel_delayed_work(&priv->request_passive_scan);
- cancel_delayed_work(&priv->scan_event);
- }
- queue_work(priv->workqueue, &priv->down);
+ cancel_delayed_work(&priv->request_scan);
+ cancel_delayed_work(&priv->request_direct_scan);
+ cancel_delayed_work(&priv->request_passive_scan);
+ cancel_delayed_work(&priv->scan_event);
+ schedule_work(&priv->down);
} else {
priv->status &= ~STATUS_RF_KILL_SW;
if (rf_kill_active(priv)) {
"disabled by HW switch\n");
/* Make sure the RF_KILL check timer is running */
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(2 * HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(2 * HZ));
} else
- queue_work(priv->workqueue, &priv->up);
+ schedule_work(&priv->up);
}
return 1;
cancel_delayed_work(&priv->request_passive_scan);
cancel_delayed_work(&priv->scan_event);
schedule_work(&priv->link_down);
- queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
handled |= IPW_INTA_BIT_RF_KILL_DONE;
}
priv->status &= ~STATUS_HCMD_ACTIVE;
wake_up_interruptible(&priv->wait_command_queue);
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
handled |= IPW_INTA_BIT_FATAL_ERROR;
}
return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
}
-/*
- * NOTE: This must be executed from our workqueue as it results in udelay
- * being called which may corrupt the keyboard if executed on default
- * workqueue
- */
static void ipw_adapter_restart(void *adapter)
{
struct ipw_priv *priv = adapter;
IPW_DEBUG_SCAN("Scan completion watchdog resetting "
"adapter after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
} else if (priv->status & STATUS_SCANNING) {
IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
"after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
ipw_abort_scan(priv);
- queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
+ schedule_delayed_work(&priv->scan_check, HZ);
}
}
if (priv->status & STATUS_ASSOCIATING) {
IPW_DEBUG_ASSOC("Disassociating while associating.\n");
- queue_work(priv->workqueue, &priv->disassociate);
+ schedule_work(&priv->disassociate);
return;
}
priv->quality = quality;
- queue_delayed_work(priv->workqueue, &priv->gather_stats,
- IPW_STATS_INTERVAL);
+ schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
}
static void ipw_bg_gather_stats(struct work_struct *work)
IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
IPW_DL_STATE,
"Aborting scan with missed beacon.\n");
- queue_work(priv->workqueue, &priv->abort_scan);
+ schedule_work(&priv->abort_scan);
}
- queue_work(priv->workqueue, &priv->disassociate);
+ schedule_work(&priv->disassociate);
return;
}
if (!(priv->status & STATUS_ROAMING)) {
priv->status |= STATUS_ROAMING;
if (!(priv->status & STATUS_SCANNING))
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
}
return;
}
* channels..) */
IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
"Aborting scan with missed beacon.\n");
- queue_work(priv->workqueue, &priv->abort_scan);
+ schedule_work(&priv->abort_scan);
}
IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event))
- queue_delayed_work(priv->workqueue, &priv->scan_event,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
union iwreq_data wrqu;
IPW_DEBUG_ASSOC
("queueing adhoc check\n");
- queue_delayed_work(priv->
- workqueue,
- &priv->
- adhoc_check,
- le16_to_cpu(priv->
- assoc_request.
- beacon_interval));
+ schedule_delayed_work(
+ &priv->adhoc_check,
+ le16_to_cpu(priv->
+ assoc_request.
+ beacon_interval));
break;
}
priv->status &= ~STATUS_ASSOCIATING;
priv->status |= STATUS_ASSOCIATED;
- queue_work(priv->workqueue,
- &priv->system_config);
+ schedule_work(&priv->system_config);
#ifdef CONFIG_IPW2200_QOS
#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
#ifdef CONFIG_IPW2200_MONITOR
if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
priv->status |= STATUS_SCAN_FORCED;
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
break;
}
priv->status &= ~STATUS_SCAN_FORCED;
#endif /* CONFIG_IPW2200_MONITOR */
/* Do queued direct scans first */
- if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
- queue_delayed_work(priv->workqueue,
- &priv->request_direct_scan, 0);
- }
+ if (priv->status & STATUS_DIRECT_SCAN_PENDING)
+ schedule_delayed_work(&priv->request_direct_scan, 0);
if (!(priv->status & (STATUS_ASSOCIATED |
STATUS_ASSOCIATING |
STATUS_ROAMING |
STATUS_DISASSOCIATING)))
- queue_work(priv->workqueue, &priv->associate);
+ schedule_work(&priv->associate);
else if (priv->status & STATUS_ROAMING) {
if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
/* If a scan completed and we are in roam mode, then
* the scan that completed was the one requested as a
* result of entering roam... so, schedule the
* roam work */
- queue_work(priv->workqueue,
- &priv->roam);
+ schedule_work(&priv->roam);
else
/* Don't schedule if we aborted the scan */
priv->status &= ~STATUS_ROAMING;
} else if (priv->status & STATUS_SCAN_PENDING)
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
else if (priv->config & CFG_BACKGROUND_SCAN
&& priv->status & STATUS_ASSOCIATED)
- queue_delayed_work(priv->workqueue,
- &priv->request_scan,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->request_scan,
+ round_jiffies_relative(HZ));
/* Send an empty event to user space.
* We don't send the received data on the event because
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
+ schedule_work(&priv->rx_replenish);
/* If we've added more space for the firmware to place data, tell it */
if (write != rxq->write)
return;
}
- queue_delayed_work(priv->workqueue, &priv->adhoc_check,
- le16_to_cpu(priv->assoc_request.beacon_interval));
+ schedule_delayed_work(&priv->adhoc_check,
+ le16_to_cpu(priv->assoc_request.beacon_interval));
}
static void ipw_bg_adhoc_check(struct work_struct *work)
} else
priv->status &= ~STATUS_SCAN_PENDING;
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IPW_SCAN_CHECK_WATCHDOG);
+ schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
done:
mutex_unlock(&priv->mutex);
return err;
!memcmp(network->ssid,
priv->assoc_network->ssid,
network->ssid_len)) {
- queue_work(priv->workqueue,
- &priv->merge_networks);
+ schedule_work(&priv->merge_networks);
}
}
if (priv->status & STATUS_DISASSOCIATING) {
IPW_DEBUG_ASSOC("Not attempting association (in "
"disassociating)\n ");
- queue_work(priv->workqueue, &priv->associate);
+ schedule_work(&priv->associate);
return 0;
}
if (!(priv->status & STATUS_SCANNING)) {
if (!(priv->config & CFG_SPEED_SCAN))
- queue_delayed_work(priv->workqueue,
- &priv->request_scan,
- SCAN_INTERVAL);
+ schedule_delayed_work(&priv->request_scan,
+ SCAN_INTERVAL);
else
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
}
return 0;
priv->ieee->iw_mode = wrqu->mode;
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
mutex_unlock(&priv->mutex);
return err;
}
IPW_DEBUG_WX("Start scan\n");
- queue_delayed_work(priv->workqueue, work, 0);
+ schedule_delayed_work(work, 0);
return 0;
}
#else
priv->net_dev->type = ARPHRD_IEEE80211;
#endif
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
}
ipw_set_channel(priv, parms[1]);
return 0;
}
priv->net_dev->type = ARPHRD_ETHER;
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
}
mutex_unlock(&priv->mutex);
return 0;
{
struct ipw_priv *priv = libipw_priv(dev);
IPW_DEBUG_WX("RESET\n");
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
return 0;
}
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
printk(KERN_INFO "%s: Setting MAC to %pM\n",
priv->net_dev->name, priv->mac_addr);
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
mutex_unlock(&priv->mutex);
return 0;
}
if (rf_kill_active(priv)) {
IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
- if (priv->workqueue)
- queue_delayed_work(priv->workqueue,
- &priv->rf_kill, 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
goto exit_unlock;
}
"device\n");
/* we can not do an adapter restart while inside an irq lock */
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
} else
IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
"enabled\n");
notify_wx_assoc_event(priv);
if (priv->config & CFG_BACKGROUND_SCAN)
- queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
+ schedule_delayed_work(&priv->request_scan, HZ);
}
static void ipw_bg_link_up(struct work_struct *work)
if (!(priv->status & STATUS_EXIT_PENDING)) {
/* Queue up another scan... */
- queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
} else
cancel_delayed_work(&priv->scan_event);
}
{
int ret = 0;
- priv->workqueue = create_workqueue(DRV_NAME);
init_waitqueue_head(&priv->wait_command_queue);
init_waitqueue_head(&priv->wait_state);
IPW_WARNING("Radio Frequency Kill Switch is On:\n"
"Kill switch must be turned off for "
"wireless networking to work.\n");
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
return 0;
}
/* If configure to try and auto-associate, kick
* off a scan. */
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
return 0;
}
err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
if (err) {
IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
- goto out_destroy_workqueue;
+ goto out_iounmap;
}
SET_NETDEV_DEV(net_dev, &pdev->dev);
sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
out_release_irq:
free_irq(pdev->irq, priv);
- out_destroy_workqueue:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
out_iounmap:
iounmap(priv->hw_base);
out_pci_release_regions:
kfree(priv->cmdlog);
priv->cmdlog = NULL;
}
- /* ipw_down will ensure that there is no more pending work
- * in the workqueue's, so we can safely remove them now. */
- cancel_delayed_work(&priv->adhoc_check);
- cancel_delayed_work(&priv->gather_stats);
- cancel_delayed_work(&priv->request_scan);
- cancel_delayed_work(&priv->request_direct_scan);
- cancel_delayed_work(&priv->request_passive_scan);
- cancel_delayed_work(&priv->scan_event);
- cancel_delayed_work(&priv->rf_kill);
- cancel_delayed_work(&priv->scan_check);
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
+
+ /* make sure all works are inactive */
+ cancel_delayed_work_sync(&priv->adhoc_check);
+ cancel_work_sync(&priv->associate);
+ cancel_work_sync(&priv->disassociate);
+ cancel_work_sync(&priv->system_config);
+ cancel_work_sync(&priv->rx_replenish);
+ cancel_work_sync(&priv->adapter_restart);
+ cancel_delayed_work_sync(&priv->rf_kill);
+ cancel_work_sync(&priv->up);
+ cancel_work_sync(&priv->down);
+ cancel_delayed_work_sync(&priv->request_scan);
+ cancel_delayed_work_sync(&priv->request_direct_scan);
+ cancel_delayed_work_sync(&priv->request_passive_scan);
+ cancel_delayed_work_sync(&priv->scan_event);
+ cancel_delayed_work_sync(&priv->gather_stats);
+ cancel_work_sync(&priv->abort_scan);
+ cancel_work_sync(&priv->roam);
+ cancel_delayed_work_sync(&priv->scan_check);
+ cancel_work_sync(&priv->link_up);
+ cancel_work_sync(&priv->link_down);
+ cancel_delayed_work_sync(&priv->led_link_on);
+ cancel_delayed_work_sync(&priv->led_link_off);
+ cancel_delayed_work_sync(&priv->led_act_off);
+ cancel_work_sync(&priv->merge_networks);
/* Free MAC hash list for ADHOC */
for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
priv->suspend_time = get_seconds() - priv->suspend_at;
/* Bring the device back up */
- queue_work(priv->workqueue, &priv->up);
+ schedule_work(&priv->up);
return 0;
}
u8 direct_scan_ssid[IW_ESSID_MAX_SIZE];
u8 direct_scan_ssid_len;
- struct workqueue_struct *workqueue;
-
struct delayed_work adhoc_check;
struct work_struct associate;
struct work_struct disassociate;
snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
phba->shost->host_no);
- phba->wq = create_workqueue(phba->wq_name);
+ phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
if (!phba->wq) {
shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
"Failed to allocate work queue\n");
"Can't create request queue\n");
goto fail;
}
- ha->wq = create_workqueue("qla2xxx_wq");
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
vha->req = ha->req_q_map[req];
options |= BIT_1;
for (ques = 1; ques < ha->max_rsp_queues; ques++) {
if (!scsi_tgt_cmd_cache)
return -ENOMEM;
- scsi_tgtd = create_workqueue("scsi_tgtd");
+ scsi_tgtd = alloc_workqueue("scsi_tgtd", 0, 1);
if (!scsi_tgtd) {
err = -ENOMEM;
goto free_kmemcache;
kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
- aio_wq = create_workqueue("aio");
+ aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
BUG_ON(!aio_wq || !abe_pool);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock);
- queue_work(aio_wq, &fput_work);
+ schedule_work(&fput_work);
} else {
req->ki_filp = NULL;
really_put_req(ctx, req);
percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
no_journal:
- EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
+ /*
+ * The maximum number of concurrent works can be high and
+ * concurrency isn't really necessary. Limit it to 1.
+ */
+ EXT4_SB(sb)->dio_unwritten_wq =
+ alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM, 1);
if (!EXT4_SB(sb)->dio_unwritten_wq) {
printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
goto failed_mount_wq;
{
struct workqueue_struct *wq;
dprintk("RPC: creating workqueue nfsiod\n");
- wq = alloc_workqueue("nfsiod", WQ_RESCUER, 0);
+ wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0);
if (wq == NULL)
return -ENOMEM;
nfsiod_workqueue = wq;
extern const struct dquot_operations ocfs2_quota_operations;
extern struct quota_format_type ocfs2_quota_format;
-int ocfs2_quota_setup(void);
-void ocfs2_quota_shutdown(void);
-
#endif /* _OCFS2_QUOTA_H */
* write to gf
*/
-static struct workqueue_struct *ocfs2_quota_wq = NULL;
-
static void qsync_work_fn(struct work_struct *work);
static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
OCFS2_QBLK_RESERVED_SPACE;
oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
- queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
- msecs_to_jiffies(oinfo->dqi_syncms));
+ schedule_delayed_work(&oinfo->dqi_sync_work,
+ msecs_to_jiffies(oinfo->dqi_syncms));
out_err:
mlog_exit(status);
struct super_block *sb = oinfo->dqi_gqinode->i_sb;
dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
- queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
- msecs_to_jiffies(oinfo->dqi_syncms));
+ schedule_delayed_work(&oinfo->dqi_sync_work,
+ msecs_to_jiffies(oinfo->dqi_syncms));
}
/*
.alloc_dquot = ocfs2_alloc_dquot,
.destroy_dquot = ocfs2_destroy_dquot,
};
-
-int ocfs2_quota_setup(void)
-{
- ocfs2_quota_wq = create_workqueue("o2quot");
- if (!ocfs2_quota_wq)
- return -ENOMEM;
- return 0;
-}
-
-void ocfs2_quota_shutdown(void)
-{
- if (ocfs2_quota_wq) {
- flush_workqueue(ocfs2_quota_wq);
- destroy_workqueue(ocfs2_quota_wq);
- ocfs2_quota_wq = NULL;
- }
-}
mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n");
}
- status = ocfs2_quota_setup();
- if (status)
- goto leave;
-
ocfs2_set_locking_protocol();
status = register_quota_format(&ocfs2_quota_format);
leave:
if (status < 0) {
- ocfs2_quota_shutdown();
ocfs2_free_mem_caches();
exit_ocfs2_uptodate_cache();
}
{
mlog_entry_void();
- ocfs2_quota_shutdown();
-
if (ocfs2_wq) {
flush_workqueue(ocfs2_wq);
destroy_workqueue(ocfs2_wq);
reiserfs_mounted_fs_count++;
if (reiserfs_mounted_fs_count <= 1) {
reiserfs_write_unlock(sb);
- commit_wq = create_workqueue("reiserfs");
+ commit_wq = alloc_workqueue("reiserfs", WQ_MEM_RECLAIM, 0);
reiserfs_write_lock(sb);
}
if (!xfslogd_workqueue)
goto out_free_buf_zone;
- xfsdatad_workqueue = create_workqueue("xfsdatad");
+ xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1);
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
- xfsconvertd_workqueue = create_workqueue("xfsconvertd");
+ xfsconvertd_workqueue = alloc_workqueue("xfsconvertd",
+ WQ_MEM_RECLAIM, 1);
if (!xfsconvertd_workqueue)
goto out_destroy_xfsdatad_workqueue;
if (!xfs_mru_elem_zone)
goto out;
- xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache");
+ xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache", WQ_MEM_RECLAIM, 1);
if (!xfs_mru_reap_wq)
goto out_destroy_mru_elem_zone;
* any specific CPU, not concurrency managed, and all queued works are
* executed immediately as long as max_active limit is not reached and
* resources are available.
+ *
+ * system_freezeable_wq is equivalent to system_wq except that it's
+ * freezeable.
*/
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_nrt_wq;
extern struct workqueue_struct *system_unbound_wq;
+extern struct workqueue_struct *system_freezeable_wq;
extern struct workqueue_struct *
__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
struct workqueue_struct *system_long_wq __read_mostly;
struct workqueue_struct *system_nrt_wq __read_mostly;
struct workqueue_struct *system_unbound_wq __read_mostly;
+struct workqueue_struct *system_freezeable_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_wq);
EXPORT_SYMBOL_GPL(system_long_wq);
EXPORT_SYMBOL_GPL(system_nrt_wq);
EXPORT_SYMBOL_GPL(system_unbound_wq);
+EXPORT_SYMBOL_GPL(system_freezeable_wq);
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
WQ_UNBOUND_MAX_ACTIVE);
+ system_freezeable_wq = alloc_workqueue("events_freezeable",
+ WQ_FREEZEABLE, 0);
BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
- !system_unbound_wq);
+ !system_unbound_wq || !system_freezeable_wq);
return 0;
}
early_initcall(init_workqueues);
unsigned long wsched;
};
+static void p9_poll_workfn(struct work_struct *work);
+
static DEFINE_SPINLOCK(p9_poll_lock);
static LIST_HEAD(p9_poll_pending_list);
-static struct workqueue_struct *p9_mux_wq;
-static struct task_struct *p9_poll_task;
+static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
static void p9_mux_poll_stop(struct p9_conn *m)
{
if (n & POLLIN) {
P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
- queue_work(p9_mux_wq, &m->rq);
+ schedule_work(&m->rq);
} else
clear_bit(Rworksched, &m->wsched);
} else
if (n & POLLOUT) {
P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
- queue_work(p9_mux_wq, &m->wq);
+ schedule_work(&m->wq);
} else
clear_bit(Wworksched, &m->wsched);
} else
container_of(wait, struct p9_poll_wait, wait);
struct p9_conn *m = pwait->conn;
unsigned long flags;
- DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
spin_lock_irqsave(&p9_poll_lock, flags);
if (list_empty(&m->poll_pending_link))
list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
spin_unlock_irqrestore(&p9_poll_lock, flags);
- /* perform the default wake up operation */
- return default_wake_function(&dummy_wait, mode, sync, key);
+ schedule_work(&p9_poll_work);
+ return 1;
}
/**
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
if (!test_and_set_bit(Rworksched, &m->wsched)) {
P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
- queue_work(p9_mux_wq, &m->rq);
+ schedule_work(&m->rq);
}
}
if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
- queue_work(p9_mux_wq, &m->wq);
+ schedule_work(&m->wq);
}
}
}
n = p9_fd_poll(m->client, NULL);
if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
- queue_work(p9_mux_wq, &m->wq);
+ schedule_work(&m->wq);
return 0;
}
*
*/
-static int p9_poll_proc(void *a)
+static void p9_poll_workfn(struct work_struct *work)
{
unsigned long flags;
P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
- repeat:
+
spin_lock_irqsave(&p9_poll_lock, flags);
while (!list_empty(&p9_poll_pending_list)) {
struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
}
spin_unlock_irqrestore(&p9_poll_lock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
- if (list_empty(&p9_poll_pending_list)) {
- P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n");
- schedule();
- }
- __set_current_state(TASK_RUNNING);
-
- if (!kthread_should_stop())
- goto repeat;
-
P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
- return 0;
}
int p9_trans_fd_init(void)
{
- p9_mux_wq = create_workqueue("v9fs");
- if (!p9_mux_wq) {
- printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
- return -ENOMEM;
- }
-
- p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
- if (IS_ERR(p9_poll_task)) {
- destroy_workqueue(p9_mux_wq);
- printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
- return PTR_ERR(p9_poll_task);
- }
-
v9fs_register_trans(&p9_tcp_trans);
v9fs_register_trans(&p9_unix_trans);
v9fs_register_trans(&p9_fd_trans);
void p9_trans_fd_exit(void)
{
- kthread_stop(p9_poll_task);
+ flush_work_sync(&p9_poll_work);
v9fs_unregister_trans(&p9_tcp_trans);
v9fs_unregister_trans(&p9_unix_trans);
v9fs_unregister_trans(&p9_fd_trans);
-
- destroy_workqueue(p9_mux_wq);
}
rds_ib_sysctl_exit();
rds_ib_recv_exit();
rds_trans_unregister(&rds_ib_transport);
- rds_ib_fmr_exit();
}
struct rds_transport rds_ib_transport = {
INIT_LIST_HEAD(&rds_ib_devices);
- ret = rds_ib_fmr_init();
- if (ret)
- goto out;
-
ret = ib_register_client(&rds_ib_client);
if (ret)
- goto out_fmr_exit;
+ goto out;
ret = rds_ib_sysctl_init();
if (ret)
rds_ib_sysctl_exit();
out_ibreg:
rds_ib_unregister_client();
-out_fmr_exit:
- rds_ib_fmr_exit();
out:
return ret;
}
void rds_ib_sync_mr(void *trans_private, int dir);
void rds_ib_free_mr(void *trans_private, int invalidate);
void rds_ib_flush_mrs(void);
-int rds_ib_fmr_init(void);
-void rds_ib_fmr_exit(void);
/* ib_recv.c */
int rds_ib_recv_init(void);
#include "ib.h"
#include "xlist.h"
-static struct workqueue_struct *rds_ib_fmr_wq;
-
static DEFINE_PER_CPU(unsigned long, clean_list_grace);
#define CLEAN_LIST_BUSY_BIT 0
int err = 0, iter = 0;
if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
- queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
+ schedule_delayed_work(&pool->flush_worker, 10);
while (1) {
ibmr = rds_ib_reuse_fmr(pool);
return ret;
}
-int rds_ib_fmr_init(void)
-{
- rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
- if (!rds_ib_fmr_wq)
- return -ENOMEM;
- return 0;
-}
-
-/*
- * By the time this is called all the IB devices should have been torn down and
- * had their pools freed. As each pool is freed its work struct is waited on,
- * so the pool flushing work queue should be idle by the time we get here.
- */
-void rds_ib_fmr_exit(void)
-{
- destroy_workqueue(rds_ib_fmr_wq);
-}
-
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
{
struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
atomic_read(&pool->dirty_count) >= pool->max_items / 10)
- queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
+ schedule_delayed_work(&pool->flush_worker, 10);
if (invalidate) {
if (likely(!in_interrupt())) {
} else {
/* We get here if the user created a MR marked
* as use_once and invalidate at the same time. */
- queue_delayed_work(rds_ib_fmr_wq,
- &pool->flush_worker, 10);
+ schedule_delayed_work(&pool->flush_worker, 10);
}
}
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
- wq = alloc_workqueue("rpciod", WQ_RESCUER, 0);
+ wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
rpciod_workqueue = wq;
return rpciod_workqueue != NULL;
}