Skip to content

Commit d9aaed8

Browse files
Artemy-Mellanoxdavem330
authored andcommitted
{net,IB}/mlx5: Refactor page fault handling
* Update page fault event according to last specification. * Separate code path for page fault EQ, completion EQ and async EQ. * Move page fault handling work queue from mlx5_ib static variable into mlx5_core page fault EQ. * Allocate memory to store ODP event dynamically as the events arrive, since in atomic context - use mempool. * Make mlx5_ib page fault handler run in process context. Signed-off-by: Artemy Kovalyov <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 223cdc7 commit d9aaed8

File tree

12 files changed

+522
-468
lines changed

12 files changed

+522
-468
lines changed

drivers/infiniband/hw/mlx5/main.c

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3319,6 +3319,9 @@ static struct mlx5_interface mlx5_ib_interface = {
33193319
.add = mlx5_ib_add,
33203320
.remove = mlx5_ib_remove,
33213321
.event = mlx5_ib_event,
3322+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3323+
.pfault = mlx5_ib_pfault,
3324+
#endif
33223325
.protocol = MLX5_INTERFACE_PROTOCOL_IB,
33233326
};
33243327

@@ -3329,25 +3332,14 @@ static int __init mlx5_ib_init(void)
33293332
if (deprecated_prof_sel != 2)
33303333
pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
33313334

3332-
err = mlx5_ib_odp_init();
3333-
if (err)
3334-
return err;
3335-
33363335
err = mlx5_register_interface(&mlx5_ib_interface);
3337-
if (err)
3338-
goto clean_odp;
3339-
3340-
return err;
33413336

3342-
clean_odp:
3343-
mlx5_ib_odp_cleanup();
33443337
return err;
33453338
}
33463339

33473340
static void __exit mlx5_ib_cleanup(void)
33483341
{
33493342
mlx5_unregister_interface(&mlx5_ib_interface);
3350-
mlx5_ib_odp_cleanup();
33513343
}
33523344

33533345
module_init(mlx5_ib_init);

drivers/infiniband/hw/mlx5/mlx5_ib.h

Lines changed: 2 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -277,29 +277,6 @@ struct mlx5_ib_rwq_ind_table {
277277
u32 rqtn;
278278
};
279279

280-
/*
281-
* Connect-IB can trigger up to four concurrent pagefaults
282-
* per-QP.
283-
*/
284-
enum mlx5_ib_pagefault_context {
285-
MLX5_IB_PAGEFAULT_RESPONDER_READ,
286-
MLX5_IB_PAGEFAULT_REQUESTOR_READ,
287-
MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
288-
MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
289-
MLX5_IB_PAGEFAULT_CONTEXTS
290-
};
291-
292-
static inline enum mlx5_ib_pagefault_context
293-
mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
294-
{
295-
return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
296-
}
297-
298-
struct mlx5_ib_pfault {
299-
struct work_struct work;
300-
struct mlx5_pagefault mpfault;
301-
};
302-
303280
struct mlx5_ib_ubuffer {
304281
struct ib_umem *umem;
305282
int buf_size;
@@ -385,20 +362,6 @@ struct mlx5_ib_qp {
385362
/* Store signature errors */
386363
bool signature_en;
387364

388-
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
389-
/*
390-
* A flag that is true for QP's that are in a state that doesn't
391-
* allow page faults, and shouldn't schedule any more faults.
392-
*/
393-
int disable_page_faults;
394-
/*
395-
* The disable_page_faults_lock protects a QP's disable_page_faults
396-
* field, allowing for a thread to atomically check whether the QP
397-
* allows page faults, and if so schedule a page fault.
398-
*/
399-
spinlock_t disable_page_faults_lock;
400-
struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
401-
#endif
402365
struct list_head qps_list;
403366
struct list_head cq_recv_list;
404367
struct list_head cq_send_list;
@@ -869,18 +832,13 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
869832
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
870833

871834
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
872-
extern struct workqueue_struct *mlx5_ib_page_fault_wq;
873-
874835
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
875-
void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
876-
struct mlx5_ib_pfault *pfault);
877-
void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
836+
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
837+
struct mlx5_pagefault *pfault);
878838
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
879839
void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
880840
int __init mlx5_ib_odp_init(void);
881841
void mlx5_ib_odp_cleanup(void);
882-
void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
883-
void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
884842
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
885843
unsigned long end);
886844
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
@@ -889,13 +847,10 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
889847
return;
890848
}
891849

892-
static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
893850
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
894851
static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
895852
static inline int mlx5_ib_odp_init(void) { return 0; }
896853
static inline void mlx5_ib_odp_cleanup(void) {}
897-
static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
898-
static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
899854

900855
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
901856

0 commit comments

Comments
 (0)