@@ -121,7 +121,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
121
121
if (x -> xso .type == XFRM_DEV_OFFLOAD_CRYPTO )
122
122
esn_msb = xfrm_replay_seqhi (x , htonl (seq_bottom ));
123
123
124
- sa_entry -> esn_state .esn = esn ;
124
+ if (sa_entry -> esn_state .esn_msb )
125
+ sa_entry -> esn_state .esn = esn ;
126
+ else
127
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
128
+ * the first packet sent using a given SA will contain a sequence
129
+ * number of 1.
130
+ */
131
+ sa_entry -> esn_state .esn = max_t (u32 , esn , 1 );
125
132
sa_entry -> esn_state .esn_msb = esn_msb ;
126
133
127
134
if (unlikely (overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID )) {
@@ -335,6 +342,27 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
335
342
attrs -> replay_esn .esn = sa_entry -> esn_state .esn ;
336
343
attrs -> replay_esn .esn_msb = sa_entry -> esn_state .esn_msb ;
337
344
attrs -> replay_esn .overlap = sa_entry -> esn_state .overlap ;
345
+ switch (x -> replay_esn -> replay_window ) {
346
+ case 32 :
347
+ attrs -> replay_esn .replay_window =
348
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT ;
349
+ break ;
350
+ case 64 :
351
+ attrs -> replay_esn .replay_window =
352
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT ;
353
+ break ;
354
+ case 128 :
355
+ attrs -> replay_esn .replay_window =
356
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT ;
357
+ break ;
358
+ case 256 :
359
+ attrs -> replay_esn .replay_window =
360
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT ;
361
+ break ;
362
+ default :
363
+ WARN_ON (true);
364
+ return ;
365
+ }
338
366
}
339
367
340
368
attrs -> dir = x -> xso .dir ;
@@ -907,9 +935,11 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
907
935
return ;
908
936
909
937
mlx5e_accel_ipsec_fs_cleanup (ipsec );
910
- if (mlx5_ipsec_device_caps ( priv -> mdev ) & MLX5_IPSEC_CAP_TUNNEL )
938
+ if (ipsec -> netevent_nb . notifier_call ) {
911
939
unregister_netevent_notifier (& ipsec -> netevent_nb );
912
- if (mlx5_ipsec_device_caps (priv -> mdev ) & MLX5_IPSEC_CAP_PACKET_OFFLOAD )
940
+ ipsec -> netevent_nb .notifier_call = NULL ;
941
+ }
942
+ if (ipsec -> aso )
913
943
mlx5e_ipsec_aso_cleanup (ipsec );
914
944
destroy_workqueue (ipsec -> wq );
915
945
kfree (ipsec );
@@ -1018,6 +1048,12 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
1018
1048
}
1019
1049
}
1020
1050
1051
+ if (x -> xdo .type == XFRM_DEV_OFFLOAD_PACKET &&
1052
+ !(mlx5_ipsec_device_caps (mdev ) & MLX5_IPSEC_CAP_PACKET_OFFLOAD )) {
1053
+ NL_SET_ERR_MSG_MOD (extack , "Packet offload is not supported" );
1054
+ return - EINVAL ;
1055
+ }
1056
+
1021
1057
return 0 ;
1022
1058
}
1023
1059
@@ -1113,14 +1149,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
1113
1149
.xdo_dev_state_free = mlx5e_xfrm_free_state ,
1114
1150
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok ,
1115
1151
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state ,
1116
- };
1117
-
1118
- static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
1119
- .xdo_dev_state_add = mlx5e_xfrm_add_state ,
1120
- .xdo_dev_state_delete = mlx5e_xfrm_del_state ,
1121
- .xdo_dev_state_free = mlx5e_xfrm_free_state ,
1122
- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok ,
1123
- .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state ,
1124
1152
1125
1153
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft ,
1126
1154
.xdo_dev_policy_add = mlx5e_xfrm_add_policy ,
@@ -1138,11 +1166,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
1138
1166
1139
1167
mlx5_core_info (mdev , "mlx5e: IPSec ESP acceleration enabled\n" );
1140
1168
1141
- if (mlx5_ipsec_device_caps (mdev ) & MLX5_IPSEC_CAP_PACKET_OFFLOAD )
1142
- netdev -> xfrmdev_ops = & mlx5e_ipsec_packet_xfrmdev_ops ;
1143
- else
1144
- netdev -> xfrmdev_ops = & mlx5e_ipsec_xfrmdev_ops ;
1145
-
1169
+ netdev -> xfrmdev_ops = & mlx5e_ipsec_xfrmdev_ops ;
1146
1170
netdev -> features |= NETIF_F_HW_ESP ;
1147
1171
netdev -> hw_enc_features |= NETIF_F_HW_ESP ;
1148
1172
0 commit comments