@@ -1029,19 +1029,27 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
1029
1029
if (!dev -> cache .wq )
1030
1030
return ;
1031
1031
1032
- cancel_delayed_work_sync (& dev -> cache .remove_ent_dwork );
1033
1032
mutex_lock (& dev -> cache .rb_lock );
1033
+ dev -> cache .disable = true;
1034
1034
for (node = rb_first (root ); node ; node = rb_next (node )) {
1035
1035
ent = rb_entry (node , struct mlx5_cache_ent , node );
1036
1036
xa_lock_irq (& ent -> mkeys );
1037
1037
ent -> disabled = true;
1038
1038
xa_unlock_irq (& ent -> mkeys );
1039
- cancel_delayed_work_sync (& ent -> dwork );
1040
1039
}
1040
+ mutex_unlock (& dev -> cache .rb_lock );
1041
+
1042
+ /*
1043
+ * After all entries are disabled and will not reschedule on WQ,
1044
+ * flush it and all async commands.
1045
+ */
1046
+ flush_workqueue (dev -> cache .wq );
1041
1047
1042
1048
mlx5_mkey_cache_debugfs_cleanup (dev );
1043
1049
mlx5_cmd_cleanup_async_ctx (& dev -> async_ctx );
1044
1050
1051
+ /* At this point all entries are disabled and have no concurrent work. */
1052
+ mutex_lock (& dev -> cache .rb_lock );
1045
1053
node = rb_first (root );
1046
1054
while (node ) {
1047
1055
ent = rb_entry (node , struct mlx5_cache_ent , node );
@@ -1827,6 +1835,10 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
1827
1835
}
1828
1836
1829
1837
mutex_lock (& cache -> rb_lock );
1838
+ if (cache -> disable ) {
1839
+ mutex_unlock (& cache -> rb_lock );
1840
+ return 0 ;
1841
+ }
1830
1842
ent = mkey_cache_ent_from_rb_key (dev , mr -> mmkey .rb_key );
1831
1843
if (ent ) {
1832
1844
if (ent -> rb_key .ndescs == mr -> mmkey .rb_key .ndescs ) {
0 commit comments