Skip to content

Commit 1e458c2

Browse files
Dave Marquardtgregkh
Dave Marquardt
authored andcommitted
net: ibmveth: make veth_pool_store stop hanging
[ Upstream commit 053f3ff ] v2: - Created a single error handling unlock and exit in veth_pool_store - Greatly expanded commit message with previous explanatory-only text Summary: Use rtnl_mutex to synchronize veth_pool_store with itself, ibmveth_close and ibmveth_open, preventing multiple calls in a row to napi_disable. Background: Two (or more) threads could call veth_pool_store through writing to /sys/devices/vio/30000002/pool*/*. You can do this easily with a little shell script. This causes a hang. I configured LOCKDEP, compiled ibmveth.c with DEBUG, and built a new kernel. I ran this test again and saw: Setting pool0/active to 0 Setting pool1/active to 1 [ 73.911067][ T4365] ibmveth 30000002 eth0: close starting Setting pool1/active to 1 Setting pool1/active to 0 [ 73.911367][ T4366] ibmveth 30000002 eth0: close starting [ 73.916056][ T4365] ibmveth 30000002 eth0: close complete [ 73.916064][ T4365] ibmveth 30000002 eth0: open starting [ 110.808564][ T712] systemd-journald[712]: Sent WATCHDOG=1 notification. [ 230.808495][ T712] systemd-journald[712]: Sent WATCHDOG=1 notification. [ 243.683786][ T123] INFO: task stress.sh:4365 blocked for more than 122 seconds. [ 243.683827][ T123] Not tainted 6.14.0-01103-g2df0c02dab82-dirty #8 [ 243.683833][ T123] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 243.683838][ T123] task:stress.sh state:D stack:28096 pid:4365 tgid:4365 ppid:4364 task_flags:0x400040 flags:0x00042000 [ 243.683852][ T123] Call Trace: [ 243.683857][ T123] [c00000000c38f690] [0000000000000001] 0x1 (unreliable) [ 243.683868][ T123] [c00000000c38f840] [c00000000001f908] __switch_to+0x318/0x4e0 [ 243.683878][ T123] [c00000000c38f8a0] [c000000001549a70] __schedule+0x500/0x12a0 [ 243.683888][ T123] [c00000000c38f9a0] [c00000000154a878] schedule+0x68/0x210 [ 243.683896][ T123] [c00000000c38f9d0] [c00000000154ac80] schedule_preempt_disabled+0x30/0x50 [ 243.683904][ T123] [c00000000c38fa00] [c00000000154dbb0] __mutex_lock+0x730/0x10f0 [ 243.683913][ T123] [c00000000c38fb10] [c000000001154d40] napi_enable+0x30/0x60 [ 243.683921][ T123] [c00000000c38fb40] [c000000000f4ae94] ibmveth_open+0x68/0x5dc [ 243.683928][ T123] [c00000000c38fbe0] [c000000000f4aa20] veth_pool_store+0x220/0x270 [ 243.683936][ T123] [c00000000c38fc70] [c000000000826278] sysfs_kf_write+0x68/0xb0 [ 243.683944][ T123] [c00000000c38fcb0] [c0000000008240b8] kernfs_fop_write_iter+0x198/0x2d0 [ 243.683951][ T123] [c00000000c38fd00] [c00000000071b9ac] vfs_write+0x34c/0x650 [ 243.683958][ T123] [c00000000c38fdc0] [c00000000071bea8] ksys_write+0x88/0x150 [ 243.683966][ T123] [c00000000c38fe10] [c0000000000317f4] system_call_exception+0x124/0x340 [ 243.683973][ T123] [c00000000c38fe50] [c00000000000d05c] system_call_vectored_common+0x15c/0x2ec ... [ 243.684087][ T123] Showing all locks held in the system: [ 243.684095][ T123] 1 lock held by khungtaskd/123: [ 243.684099][ T123] #0: c00000000278e370 (rcu_read_lock){....}-{1:2}, at: debug_show_all_locks+0x50/0x248 [ 243.684114][ T123] 4 locks held by stress.sh/4365: [ 243.684119][ T123] #0: c00000003a4cd3f8 (sb_writers#3){.+.+}-{0:0}, at: ksys_write+0x88/0x150 [ 243.684132][ T123] #1: c000000041aea888 (&of->mutex#2){+.+.}-{3:3}, at: kernfs_fop_write_iter+0x154/0x2d0 [ 243.684143][ T123] #2: c0000000366fb9a8 (kn->active#64){.+.+}-{0:0}, at: kernfs_fop_write_iter+0x160/0x2d0 [ 243.684155][ T123] #3: c000000035ff4cb8 (&dev->lock){+.+.}-{3:3}, at: napi_enable+0x30/0x60 [ 243.684166][ T123] 5 locks held by stress.sh/4366: [ 243.684170][ T123] #0: c00000003a4cd3f8 (sb_writers#3){.+.+}-{0:0}, at: ksys_write+0x88/0x150 [ 243.684183][ T123] #1: c00000000aee2288 (&of->mutex#2){+.+.}-{3:3}, at: kernfs_fop_write_iter+0x154/0x2d0 [ 243.684194][ T123] #2: c0000000366f4ba8 (kn->active#64){.+.+}-{0:0}, at: kernfs_fop_write_iter+0x160/0x2d0 [ 243.684205][ T123] #3: c000000035ff4cb8 (&dev->lock){+.+.}-{3:3}, at: napi_disable+0x30/0x60 [ 243.684216][ T123] #4: c0000003ff9bbf18 (&rq->__lock){-.-.}-{2:2}, at: __schedule+0x138/0x12a0 From the ibmveth debug, two threads are calling veth_pool_store, which calls ibmveth_close and ibmveth_open. Here's the sequence: T4365 T4366 ----------------- ----------------- --------- veth_pool_store veth_pool_store ibmveth_close ibmveth_close napi_disable napi_disable ibmveth_open napi_enable <- HANG ibmveth_close calls napi_disable at the top and ibmveth_open calls napi_enable at the top. https://docs.kernel.org/networking/napi.html]] says The control APIs are not idempotent. Control API calls are safe against concurrent use of datapath APIs but an incorrect sequence of control API calls may result in crashes, deadlocks, or race conditions. For example, calling napi_disable() multiple times in a row will deadlock. In the normal open and close paths, rtnl_mutex is acquired to prevent other callers. This is missing from veth_pool_store. Use rtnl_mutex in veth_pool_store fixes these hangs. Signed-off-by: Dave Marquardt <davemarq@linux.ibm.com> Fixes: 860f242 ("[PATCH] ibmveth change buffer pools dynamically") Reviewed-by: Nick Child <nnac123@linux.ibm.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/20250402154403.386744-1-davemarq@linux.ibm.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent ececf8e commit 1e458c2

File tree

1 file changed

+27
-12
lines changed

1 file changed

+27
-12
lines changed

drivers/net/ethernet/ibm/ibmveth.c

+27-12
Original file line numberDiff line numberDiff line change
@@ -1824,18 +1824,22 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
18241824
long value = simple_strtol(buf, NULL, 10);
18251825
long rc;
18261826

1827+
rtnl_lock();
1828+
18271829
if (attr == &veth_active_attr) {
18281830
if (value && !pool->active) {
18291831
if (netif_running(netdev)) {
18301832
if (ibmveth_alloc_buffer_pool(pool)) {
18311833
netdev_err(netdev,
18321834
"unable to alloc pool\n");
1833-
return -ENOMEM;
1835+
rc = -ENOMEM;
1836+
goto unlock_err;
18341837
}
18351838
pool->active = 1;
18361839
ibmveth_close(netdev);
1837-
if ((rc = ibmveth_open(netdev)))
1838-
return rc;
1840+
rc = ibmveth_open(netdev);
1841+
if (rc)
1842+
goto unlock_err;
18391843
} else {
18401844
pool->active = 1;
18411845
}
@@ -1855,48 +1859,59 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
18551859

18561860
if (i == IBMVETH_NUM_BUFF_POOLS) {
18571861
netdev_err(netdev, "no active pool >= MTU\n");
1858-
return -EPERM;
1862+
rc = -EPERM;
1863+
goto unlock_err;
18591864
}
18601865

18611866
if (netif_running(netdev)) {
18621867
ibmveth_close(netdev);
18631868
pool->active = 0;
1864-
if ((rc = ibmveth_open(netdev)))
1865-
return rc;
1869+
rc = ibmveth_open(netdev);
1870+
if (rc)
1871+
goto unlock_err;
18661872
}
18671873
pool->active = 0;
18681874
}
18691875
} else if (attr == &veth_num_attr) {
18701876
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1871-
return -EINVAL;
1877+
rc = -EINVAL;
1878+
goto unlock_err;
18721879
} else {
18731880
if (netif_running(netdev)) {
18741881
ibmveth_close(netdev);
18751882
pool->size = value;
1876-
if ((rc = ibmveth_open(netdev)))
1877-
return rc;
1883+
rc = ibmveth_open(netdev);
1884+
if (rc)
1885+
goto unlock_err;
18781886
} else {
18791887
pool->size = value;
18801888
}
18811889
}
18821890
} else if (attr == &veth_size_attr) {
18831891
if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1884-
return -EINVAL;
1892+
rc = -EINVAL;
1893+
goto unlock_err;
18851894
} else {
18861895
if (netif_running(netdev)) {
18871896
ibmveth_close(netdev);
18881897
pool->buff_size = value;
1889-
if ((rc = ibmveth_open(netdev)))
1890-
return rc;
1898+
rc = ibmveth_open(netdev);
1899+
if (rc)
1900+
goto unlock_err;
18911901
} else {
18921902
pool->buff_size = value;
18931903
}
18941904
}
18951905
}
1906+
rtnl_unlock();
18961907

18971908
/* kick the interrupt handler to allocate/deallocate pools */
18981909
ibmveth_interrupt(netdev->irq, netdev);
18991910
return count;
1911+
1912+
unlock_err:
1913+
rtnl_unlock();
1914+
return rc;
19001915
}
19011916

19021917

0 commit comments

Comments
 (0)