@@ -44,108 +44,132 @@ bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
4444 return !!sriov -> num_vfs ;
4545}
4646
47- static void enable_vfs (struct mlx5_core_dev * dev , int num_vfs )
47+ static int mlx5_device_enable_sriov (struct mlx5_core_dev * dev , int num_vfs )
4848{
4949 struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
5050 int err ;
5151 int vf ;
5252
53- for (vf = 1 ; vf <= num_vfs ; vf ++ ) {
54- err = mlx5_core_enable_hca (dev , vf );
53+ if (sriov -> enabled_vfs ) {
54+ mlx5_core_warn (dev ,
55+ "failed to enable SRIOV on device, already enabled with %d vfs\n" ,
56+ sriov -> enabled_vfs );
57+ return - EBUSY ;
58+ }
59+
60+ #ifdef CONFIG_MLX5_CORE_EN
61+ err = mlx5_eswitch_enable_sriov (dev -> priv .eswitch , num_vfs , SRIOV_LEGACY );
62+ if (err ) {
63+ mlx5_core_warn (dev ,
64+ "failed to enable eswitch SRIOV (%d)\n" , err );
65+ return err ;
66+ }
67+ #endif
68+
69+ for (vf = 0 ; vf < num_vfs ; vf ++ ) {
70+ err = mlx5_core_enable_hca (dev , vf + 1 );
5571 if (err ) {
56- mlx5_core_warn (dev , "failed to enable VF %d\n" , vf - 1 );
57- } else {
58- sriov -> vfs_ctx [vf - 1 ].enabled = 1 ;
59- mlx5_core_dbg (dev , "successfully enabled VF %d\n" , vf - 1 );
72+ mlx5_core_warn (dev , "failed to enable VF %d (%d)\n" , vf , err );
73+ continue ;
6074 }
75+ sriov -> vfs_ctx [vf ].enabled = 1 ;
76+ sriov -> enabled_vfs ++ ;
77+ mlx5_core_dbg (dev , "successfully enabled VF* %d\n" , vf );
78+
6179 }
80+
81+ return 0 ;
6282}
6383
64- static void disable_vfs (struct mlx5_core_dev * dev , int num_vfs )
84+ static void mlx5_device_disable_sriov (struct mlx5_core_dev * dev )
6585{
6686 struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
87+ int err ;
6788 int vf ;
6889
69- for (vf = 1 ; vf <= num_vfs ; vf ++ ) {
70- if (sriov -> vfs_ctx [vf - 1 ].enabled ) {
71- if (mlx5_core_disable_hca (dev , vf ))
72- mlx5_core_warn (dev , "failed to disable VF %d\n" , vf - 1 );
73- else
74- sriov -> vfs_ctx [vf - 1 ].enabled = 0 ;
90+ if (!sriov -> enabled_vfs )
91+ return ;
92+
93+ for (vf = 0 ; vf < sriov -> num_vfs ; vf ++ ) {
94+ if (!sriov -> vfs_ctx [vf ].enabled )
95+ continue ;
96+ err = mlx5_core_disable_hca (dev , vf + 1 );
97+ if (err ) {
98+ mlx5_core_warn (dev , "failed to disable VF %d\n" , vf );
99+ continue ;
75100 }
101+ sriov -> vfs_ctx [vf ].enabled = 0 ;
102+ sriov -> enabled_vfs -- ;
76103 }
104+
105+ #ifdef CONFIG_MLX5_CORE_EN
106+ mlx5_eswitch_disable_sriov (dev -> priv .eswitch );
107+ #endif
108+
109+ if (mlx5_wait_for_vf_pages (dev ))
110+ mlx5_core_warn (dev , "timeout reclaiming VFs pages\n" );
77111}
78112
79- static int mlx5_core_create_vfs (struct pci_dev * pdev , int num_vfs )
113+ static int mlx5_pci_enable_sriov (struct pci_dev * pdev , int num_vfs )
80114{
81115 struct mlx5_core_dev * dev = pci_get_drvdata (pdev );
82- int err ;
83-
84- if (pci_num_vf (pdev ))
85- pci_disable_sriov (pdev );
86-
87- enable_vfs (dev , num_vfs );
116+ int err = 0 ;
88117
89- err = pci_enable_sriov (pdev , num_vfs );
90- if (err ) {
91- dev_warn (& pdev -> dev , "enable sriov failed %d\n" , err );
92- goto ex ;
118+ if (pci_num_vf (pdev )) {
119+ mlx5_core_warn (dev , "Unable to enable pci sriov, already enabled\n" );
120+ return - EBUSY ;
93121 }
94122
95- return 0 ;
123+ err = pci_enable_sriov (pdev , num_vfs );
124+ if (err )
125+ mlx5_core_warn (dev , "pci_enable_sriov failed : %d\n" , err );
96126
97- ex :
98- disable_vfs (dev , num_vfs );
99127 return err ;
100128}
101129
102- static int mlx5_core_sriov_enable (struct pci_dev * pdev , int num_vfs )
130+ static void mlx5_pci_disable_sriov (struct pci_dev * pdev )
131+ {
132+ pci_disable_sriov (pdev );
133+ }
134+
135+ static int mlx5_sriov_enable (struct pci_dev * pdev , int num_vfs )
103136{
104137 struct mlx5_core_dev * dev = pci_get_drvdata (pdev );
105138 struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
106- int err ;
139+ int err = 0 ;
107140
108- kfree (sriov -> vfs_ctx );
109- sriov -> vfs_ctx = kcalloc (num_vfs , sizeof (* sriov -> vfs_ctx ), GFP_ATOMIC );
110- if (!sriov -> vfs_ctx )
111- return - ENOMEM ;
141+ err = mlx5_device_enable_sriov (dev , num_vfs );
142+ if (err ) {
143+ mlx5_core_warn (dev , "mlx5_device_enable_sriov failed : %d\n" , err );
144+ return err ;
145+ }
112146
113- sriov -> enabled_vfs = num_vfs ;
114- err = mlx5_core_create_vfs (pdev , num_vfs );
147+ err = mlx5_pci_enable_sriov (pdev , num_vfs );
115148 if (err ) {
116- kfree ( sriov -> vfs_ctx );
117- sriov -> vfs_ctx = NULL ;
149+ mlx5_core_warn ( dev , "mlx5_pci_enable_sriov failed : %d\n" , err );
150+ mlx5_device_disable_sriov ( dev ) ;
118151 return err ;
119152 }
120153
154+ sriov -> num_vfs = num_vfs ;
155+
121156 return 0 ;
122157}
123158
124- static void mlx5_core_init_vfs (struct mlx5_core_dev * dev , int num_vfs )
159+ static void mlx5_sriov_disable (struct pci_dev * pdev )
125160{
161+ struct mlx5_core_dev * dev = pci_get_drvdata (pdev );
126162 struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
127163
128- sriov -> num_vfs = num_vfs ;
129- }
130-
131- static void mlx5_core_cleanup_vfs (struct mlx5_core_dev * dev )
132- {
133- struct mlx5_core_sriov * sriov ;
134-
135- sriov = & dev -> priv .sriov ;
136- disable_vfs (dev , sriov -> num_vfs );
137-
138- if (mlx5_wait_for_vf_pages (dev ))
139- mlx5_core_warn (dev , "timeout claiming VFs pages\n" );
140-
164+ mlx5_pci_disable_sriov (pdev );
165+ mlx5_device_disable_sriov (dev );
141166 sriov -> num_vfs = 0 ;
142167}
143168
144169int mlx5_core_sriov_configure (struct pci_dev * pdev , int num_vfs )
145170{
146171 struct mlx5_core_dev * dev = pci_get_drvdata (pdev );
147- struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
148- int err ;
172+ int err = 0 ;
149173
150174 mlx5_core_dbg (dev , "requested num_vfs %d\n" , num_vfs );
151175 if (!mlx5_core_is_pf (dev ))
@@ -156,92 +180,44 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
156180 return - EINVAL ;
157181 }
158182
159- mlx5_core_cleanup_vfs (dev );
160-
161- if (!num_vfs ) {
162- #ifdef CONFIG_MLX5_CORE_EN
163- mlx5_eswitch_disable_sriov (dev -> priv .eswitch );
164- #endif
165- kfree (sriov -> vfs_ctx );
166- sriov -> vfs_ctx = NULL ;
167- if (!pci_vfs_assigned (pdev ))
168- pci_disable_sriov (pdev );
169- else
170- mlx5_core_info (dev , "unloading PF driver while leaving orphan VFs\n" );
171- return 0 ;
172- }
173-
174- err = mlx5_core_sriov_enable (pdev , num_vfs );
175- if (err ) {
176- mlx5_core_warn (dev , "mlx5_core_sriov_enable failed %d\n" , err );
177- return err ;
178- }
183+ if (num_vfs )
184+ err = mlx5_sriov_enable (pdev , num_vfs );
185+ else
186+ mlx5_sriov_disable (pdev );
179187
180- mlx5_core_init_vfs (dev , num_vfs );
181- #ifdef CONFIG_MLX5_CORE_EN
182- mlx5_eswitch_enable_sriov (dev -> priv .eswitch , num_vfs , SRIOV_LEGACY );
183- #endif
184-
185- return num_vfs ;
186- }
187-
188- static int sync_required (struct pci_dev * pdev )
189- {
190- struct mlx5_core_dev * dev = pci_get_drvdata (pdev );
191- struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
192- int cur_vfs = pci_num_vf (pdev );
193-
194- if (cur_vfs != sriov -> num_vfs ) {
195- mlx5_core_warn (dev , "current VFs %d, registered %d - sync needed\n" ,
196- cur_vfs , sriov -> num_vfs );
197- return 1 ;
198- }
199-
200- return 0 ;
188+ return err ? err : num_vfs ;
201189}
202190
203191int mlx5_sriov_init (struct mlx5_core_dev * dev )
204192{
205193 struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
206194 struct pci_dev * pdev = dev -> pdev ;
207- int cur_vfs ;
195+ int total_vfs ;
208196
209197 if (!mlx5_core_is_pf (dev ))
210198 return 0 ;
211199
212- if (!sync_required (dev -> pdev ))
213- return 0 ;
214-
215- cur_vfs = pci_num_vf (pdev );
216- sriov -> vfs_ctx = kcalloc (cur_vfs , sizeof (* sriov -> vfs_ctx ), GFP_KERNEL );
200+ total_vfs = pci_sriov_get_totalvfs (pdev );
201+ sriov -> num_vfs = pci_num_vf (pdev );
202+ sriov -> vfs_ctx = kcalloc (total_vfs , sizeof (* sriov -> vfs_ctx ), GFP_KERNEL );
217203 if (!sriov -> vfs_ctx )
218204 return - ENOMEM ;
219205
220- sriov -> enabled_vfs = cur_vfs ;
221-
222- mlx5_core_init_vfs (dev , cur_vfs );
223- #ifdef CONFIG_MLX5_CORE_EN
224- if (cur_vfs )
225- mlx5_eswitch_enable_sriov (dev -> priv .eswitch , cur_vfs ,
226- SRIOV_LEGACY );
227- #endif
228-
229- enable_vfs (dev , cur_vfs );
206+ /* If sriov VFs exist in PCI level, enable them in device level */
207+ if (!sriov -> num_vfs )
208+ return 0 ;
230209
210+ mlx5_device_enable_sriov (dev , sriov -> num_vfs );
231211 return 0 ;
232212}
233213
234- int mlx5_sriov_cleanup (struct mlx5_core_dev * dev )
214+ void mlx5_sriov_cleanup (struct mlx5_core_dev * dev )
235215{
236- struct pci_dev * pdev = dev -> pdev ;
237- int err ;
216+ struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
238217
239218 if (!mlx5_core_is_pf (dev ))
240- return 0 ;
219+ return ;
241220
242- err = mlx5_core_sriov_configure (pdev , 0 );
243- if (err )
244- return err ;
245-
246- return 0 ;
221+ mlx5_device_disable_sriov (dev );
222+ kfree (sriov -> vfs_ctx );
247223}
0 commit comments