NVMe: Start-stop nvme_thread during device add-remove.
Done to ensure nvme_thread is not running when there are no devices to poll. Signed-off-by: Dan McLeran <daniel.mcleran@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
This commit is contained in:
committed by
Matthew Wilcox
parent
b355084a89
commit
b9afca3efb
@@ -64,6 +64,7 @@ static DEFINE_SPINLOCK(dev_list_lock);
|
|||||||
static LIST_HEAD(dev_list);
|
static LIST_HEAD(dev_list);
|
||||||
static struct task_struct *nvme_thread;
|
static struct task_struct *nvme_thread;
|
||||||
static struct workqueue_struct *nvme_workq;
|
static struct workqueue_struct *nvme_workq;
|
||||||
|
static wait_queue_head_t nvme_kthread_wait;
|
||||||
|
|
||||||
static void nvme_reset_failed_dev(struct work_struct *ws);
|
static void nvme_reset_failed_dev(struct work_struct *ws);
|
||||||
|
|
||||||
@@ -2374,6 +2375,26 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
|
|||||||
kthread_stop(kworker_task);
|
kthread_stop(kworker_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove the node from the device list and check
|
||||||
|
* for whether or not we need to stop the nvme_thread.
|
||||||
|
*/
|
||||||
|
static void nvme_dev_list_remove(struct nvme_dev *dev)
|
||||||
|
{
|
||||||
|
struct task_struct *tmp = NULL;
|
||||||
|
|
||||||
|
spin_lock(&dev_list_lock);
|
||||||
|
list_del_init(&dev->node);
|
||||||
|
if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
|
||||||
|
tmp = nvme_thread;
|
||||||
|
nvme_thread = NULL;
|
||||||
|
}
|
||||||
|
spin_unlock(&dev_list_lock);
|
||||||
|
|
||||||
|
if (tmp)
|
||||||
|
kthread_stop(tmp);
|
||||||
|
}
|
||||||
|
|
||||||
static void nvme_dev_shutdown(struct nvme_dev *dev)
|
static void nvme_dev_shutdown(struct nvme_dev *dev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@@ -2381,9 +2402,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
|
|||||||
dev->initialized = 0;
|
dev->initialized = 0;
|
||||||
unregister_hotcpu_notifier(&dev->nb);
|
unregister_hotcpu_notifier(&dev->nb);
|
||||||
|
|
||||||
spin_lock(&dev_list_lock);
|
nvme_dev_list_remove(dev);
|
||||||
list_del_init(&dev->node);
|
|
||||||
spin_unlock(&dev_list_lock);
|
|
||||||
|
|
||||||
if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
|
if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
|
||||||
for (i = dev->queue_count - 1; i >= 0; i--) {
|
for (i = dev->queue_count - 1; i >= 0; i--) {
|
||||||
@@ -2524,6 +2543,7 @@ static const struct file_operations nvme_dev_fops = {
|
|||||||
static int nvme_dev_start(struct nvme_dev *dev)
|
static int nvme_dev_start(struct nvme_dev *dev)
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
|
bool start_thread = false;
|
||||||
|
|
||||||
result = nvme_dev_map(dev);
|
result = nvme_dev_map(dev);
|
||||||
if (result)
|
if (result)
|
||||||
@@ -2534,9 +2554,24 @@ static int nvme_dev_start(struct nvme_dev *dev)
|
|||||||
goto unmap;
|
goto unmap;
|
||||||
|
|
||||||
spin_lock(&dev_list_lock);
|
spin_lock(&dev_list_lock);
|
||||||
|
if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
|
||||||
|
start_thread = true;
|
||||||
|
nvme_thread = NULL;
|
||||||
|
}
|
||||||
list_add(&dev->node, &dev_list);
|
list_add(&dev->node, &dev_list);
|
||||||
spin_unlock(&dev_list_lock);
|
spin_unlock(&dev_list_lock);
|
||||||
|
|
||||||
|
if (start_thread) {
|
||||||
|
nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
|
||||||
|
wake_up(&nvme_kthread_wait);
|
||||||
|
} else
|
||||||
|
wait_event_killable(nvme_kthread_wait, nvme_thread);
|
||||||
|
|
||||||
|
if (IS_ERR_OR_NULL(nvme_thread)) {
|
||||||
|
result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
|
||||||
|
goto disable;
|
||||||
|
}
|
||||||
|
|
||||||
result = nvme_setup_io_queues(dev);
|
result = nvme_setup_io_queues(dev);
|
||||||
if (result && result != -EBUSY)
|
if (result && result != -EBUSY)
|
||||||
goto disable;
|
goto disable;
|
||||||
@@ -2545,9 +2580,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
|
|||||||
|
|
||||||
disable:
|
disable:
|
||||||
nvme_disable_queue(dev, 0);
|
nvme_disable_queue(dev, 0);
|
||||||
spin_lock(&dev_list_lock);
|
nvme_dev_list_remove(dev);
|
||||||
list_del_init(&dev->node);
|
|
||||||
spin_unlock(&dev_list_lock);
|
|
||||||
unmap:
|
unmap:
|
||||||
nvme_dev_unmap(dev);
|
nvme_dev_unmap(dev);
|
||||||
return result;
|
return result;
|
||||||
@@ -2776,14 +2809,11 @@ static int __init nvme_init(void)
|
|||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
|
init_waitqueue_head(&nvme_kthread_wait);
|
||||||
if (IS_ERR(nvme_thread))
|
|
||||||
return PTR_ERR(nvme_thread);
|
|
||||||
|
|
||||||
result = -ENOMEM;
|
|
||||||
nvme_workq = create_singlethread_workqueue("nvme");
|
nvme_workq = create_singlethread_workqueue("nvme");
|
||||||
if (!nvme_workq)
|
if (!nvme_workq)
|
||||||
goto kill_kthread;
|
return -ENOMEM;
|
||||||
|
|
||||||
result = register_blkdev(nvme_major, "nvme");
|
result = register_blkdev(nvme_major, "nvme");
|
||||||
if (result < 0)
|
if (result < 0)
|
||||||
@@ -2800,8 +2830,6 @@ static int __init nvme_init(void)
|
|||||||
unregister_blkdev(nvme_major, "nvme");
|
unregister_blkdev(nvme_major, "nvme");
|
||||||
kill_workq:
|
kill_workq:
|
||||||
destroy_workqueue(nvme_workq);
|
destroy_workqueue(nvme_workq);
|
||||||
kill_kthread:
|
|
||||||
kthread_stop(nvme_thread);
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2810,7 +2838,7 @@ static void __exit nvme_exit(void)
|
|||||||
pci_unregister_driver(&nvme_driver);
|
pci_unregister_driver(&nvme_driver);
|
||||||
unregister_blkdev(nvme_major, "nvme");
|
unregister_blkdev(nvme_major, "nvme");
|
||||||
destroy_workqueue(nvme_workq);
|
destroy_workqueue(nvme_workq);
|
||||||
kthread_stop(nvme_thread);
|
BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
|
||||||
}
|
}
|
||||||
|
|
||||||
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
|
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
|
||||||
|
Reference in New Issue
Block a user