1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
|
diff --git include/internal/ktls.h include/internal/ktls.h
index 95492fd065..3c82cae26b 100644
--- include/internal/ktls.h
+++ include/internal/ktls.h
@@ -40,6 +40,11 @@
# define OPENSSL_KTLS_AES_GCM_128
# define OPENSSL_KTLS_AES_GCM_256
# define OPENSSL_KTLS_TLS13
+# ifdef TLS_CHACHA20_IV_LEN
+# ifndef OPENSSL_NO_CHACHA
+# define OPENSSL_KTLS_CHACHA20_POLY1305
+# endif
+# endif
typedef struct tls_enable ktls_crypto_info_t;
diff --git ssl/ktls.c ssl/ktls.c
index 79d980959e..e343d382cc 100644
--- ssl/ktls.c
+++ ssl/ktls.c
@@ -10,6 +10,67 @@
#include "ssl_local.h"
#include "internal/ktls.h"
+#ifndef OPENSSL_NO_KTLS_RX
+ /*
+ * Count the number of records that were not processed yet from record boundary.
+ *
+ * This function assumes that there are only fully formed records read in the
+ * record layer. If read_ahead is enabled, then this might be false and this
+ * function will fail.
+ */
+static int count_unprocessed_records(SSL *s)
+{
+ SSL3_BUFFER *rbuf = RECORD_LAYER_get_rbuf(&s->rlayer);
+ PACKET pkt, subpkt;
+ int count = 0;
+
+ if (!PACKET_buf_init(&pkt, rbuf->buf + rbuf->offset, rbuf->left))
+ return -1;
+
+ while (PACKET_remaining(&pkt) > 0) {
+ /* Skip record type and version */
+ if (!PACKET_forward(&pkt, 3))
+ return -1;
+
+ /* Read until next record */
+ if (!PACKET_get_length_prefixed_2(&pkt, &subpkt))
+ return -1;
+
+ count += 1;
+ }
+
+ return count;
+}
+
+/*
+ * The kernel cannot offload receive if a partial TLS record has been read.
+ * Check the read buffer for unprocessed records. If the buffer contains a
+ * partial record, fail and return 0. Otherwise, update the sequence
+ * number at *rec_seq for the count of unprocessed records and return 1.
+ */
+static int check_rx_read_ahead(SSL *s, unsigned char *rec_seq)
+{
+ int bit, count_unprocessed;
+
+ count_unprocessed = count_unprocessed_records(s);
+ if (count_unprocessed < 0)
+ return 0;
+
+ /* increment the crypto_info record sequence */
+ while (count_unprocessed) {
+ for (bit = 7; bit >= 0; bit--) { /* increment */
+ ++rec_seq[bit];
+ if (rec_seq[bit] != 0)
+ break;
+ }
+ count_unprocessed--;
+
+ }
+
+ return 1;
+}
+#endif
+
#if defined(__FreeBSD__)
# include "crypto/cryptodev.h"
@@ -37,6 +98,10 @@ int ktls_check_supported_cipher(const SSL *s, const EVP_CIPHER *c,
case SSL_AES128GCM:
case SSL_AES256GCM:
return 1;
+# ifdef OPENSSL_KTLS_CHACHA20_POLY1305
+ case SSL_CHACHA20POLY1305:
+ return 1;
+# endif
case SSL_AES128:
case SSL_AES256:
if (s->ext.use_etm)
@@ -55,9 +120,9 @@ int ktls_check_supported_cipher(const SSL *s, const EVP_CIPHER *c,
}
/* Function to configure kernel TLS structure */
-int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
+int ktls_configure_crypto(SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
void *rl_sequence, ktls_crypto_info_t *crypto_info,
- unsigned char **rec_seq, unsigned char *iv,
+ int is_tx, unsigned char *iv,
unsigned char *key, unsigned char *mac_key,
size_t mac_secret_size)
{
@@ -71,6 +136,12 @@ int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
else
crypto_info->iv_len = EVP_GCM_TLS_FIXED_IV_LEN;
break;
+# ifdef OPENSSL_KTLS_CHACHA20_POLY1305
+ case SSL_CHACHA20POLY1305:
+ crypto_info->cipher_algorithm = CRYPTO_CHACHA20_POLY1305;
+ crypto_info->iv_len = EVP_CIPHER_CTX_get_iv_length(dd);
+ break;
+# endif
case SSL_AES128:
case SSL_AES256:
switch (s->s3.tmp.new_cipher->algorithm_mac) {
@@ -101,11 +172,11 @@ int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
crypto_info->tls_vminor = (s->version & 0x000000ff);
# ifdef TCP_RXTLS_ENABLE
memcpy(crypto_info->rec_seq, rl_sequence, sizeof(crypto_info->rec_seq));
- if (rec_seq != NULL)
- *rec_seq = crypto_info->rec_seq;
+ if (!is_tx && !check_rx_read_ahead(s, crypto_info->rec_seq))
+ return 0;
# else
- if (rec_seq != NULL)
- *rec_seq = NULL;
+ if (!is_tx)
+ return 0;
# endif
return 1;
};
@@ -154,15 +225,20 @@ int ktls_check_supported_cipher(const SSL *s, const EVP_CIPHER *c,
}
/* Function to configure kernel TLS structure */
-int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
+int ktls_configure_crypto(SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
void *rl_sequence, ktls_crypto_info_t *crypto_info,
- unsigned char **rec_seq, unsigned char *iv,
+ int is_tx, unsigned char *iv,
unsigned char *key, unsigned char *mac_key,
size_t mac_secret_size)
{
unsigned char geniv[12];
unsigned char *iiv = iv;
+# ifdef OPENSSL_NO_KTLS_RX
+ if (!is_tx)
+ return 0;
+# endif
+
if (s->version == TLS1_2_VERSION &&
EVP_CIPHER_get_mode(c) == EVP_CIPH_GCM_MODE) {
if (!EVP_CIPHER_CTX_get_updated_iv(dd, geniv,
@@ -186,8 +262,8 @@ int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
memcpy(crypto_info->gcm128.key, key, EVP_CIPHER_get_key_length(c));
memcpy(crypto_info->gcm128.rec_seq, rl_sequence,
TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
- if (rec_seq != NULL)
- *rec_seq = crypto_info->gcm128.rec_seq;
+ if (!is_tx && !check_rx_read_ahead(s, crypto_info->gcm128.rec_seq))
+ return 0;
return 1;
# endif
# ifdef OPENSSL_KTLS_AES_GCM_256
@@ -201,8 +277,8 @@ int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
memcpy(crypto_info->gcm256.key, key, EVP_CIPHER_get_key_length(c));
memcpy(crypto_info->gcm256.rec_seq, rl_sequence,
TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
- if (rec_seq != NULL)
- *rec_seq = crypto_info->gcm256.rec_seq;
+ if (!is_tx && !check_rx_read_ahead(s, crypto_info->gcm256.rec_seq))
+ return 0;
return 1;
# endif
# ifdef OPENSSL_KTLS_AES_CCM_128
@@ -216,8 +292,8 @@ int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
memcpy(crypto_info->ccm128.key, key, EVP_CIPHER_get_key_length(c));
memcpy(crypto_info->ccm128.rec_seq, rl_sequence,
TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
- if (rec_seq != NULL)
- *rec_seq = crypto_info->ccm128.rec_seq;
+ if (!is_tx && !check_rx_read_ahead(s, crypto_info->ccm128.rec_seq))
+ return 0;
return 1;
# endif
# ifdef OPENSSL_KTLS_CHACHA20_POLY1305
@@ -231,8 +307,10 @@ int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
EVP_CIPHER_get_key_length(c));
memcpy(crypto_info->chacha20poly1305.rec_seq, rl_sequence,
TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
- if (rec_seq != NULL)
- *rec_seq = crypto_info->chacha20poly1305.rec_seq;
+ if (!is_tx
+ && !check_rx_read_ahead(s,
+ crypto_info->chacha20poly1305.rec_seq))
+ return 0;
return 1;
# endif
default:
diff --git ssl/record/ssl3_record.c ssl/record/ssl3_record.c
index d8ef018741..63caac080f 100644
--- ssl/record/ssl3_record.c
+++ ssl/record/ssl3_record.c
@@ -185,18 +185,23 @@ int ssl3_get_record(SSL *s)
int imac_size;
size_t num_recs = 0, max_recs, j;
PACKET pkt, sslv2pkt;
- int is_ktls_left;
+ int using_ktls;
SSL_MAC_BUF *macbufs = NULL;
int ret = -1;
rr = RECORD_LAYER_get_rrec(&s->rlayer);
rbuf = RECORD_LAYER_get_rbuf(&s->rlayer);
- is_ktls_left = (SSL3_BUFFER_get_left(rbuf) > 0);
max_recs = s->max_pipelines;
if (max_recs == 0)
max_recs = 1;
sess = s->session;
+ /*
+ * KTLS reads full records. If there is any data left,
+ * then it is from before enabling ktls.
+ */
+ using_ktls = BIO_get_ktls_recv(s->rbio) && SSL3_BUFFER_get_left(rbuf) == 0;
+
do {
thisrr = &rr[num_recs];
@@ -361,7 +366,9 @@ int ssl3_get_record(SSL *s)
}
}
- if (SSL_IS_TLS13(s) && s->enc_read_ctx != NULL) {
+ if (SSL_IS_TLS13(s)
+ && s->enc_read_ctx != NULL
+ && !using_ktls) {
if (thisrr->type != SSL3_RT_APPLICATION_DATA
&& (thisrr->type != SSL3_RT_CHANGE_CIPHER_SPEC
|| !SSL_IS_FIRST_HANDSHAKE(s))
@@ -391,7 +398,13 @@ int ssl3_get_record(SSL *s)
}
if (SSL_IS_TLS13(s)) {
- if (thisrr->length > SSL3_RT_MAX_TLS13_ENCRYPTED_LENGTH) {
+ size_t len = SSL3_RT_MAX_TLS13_ENCRYPTED_LENGTH;
+
+ /* KTLS strips the inner record type. */
+ if (using_ktls)
+ len = SSL3_RT_MAX_ENCRYPTED_LENGTH;
+
+ if (thisrr->length > len) {
SSLfatal(s, SSL_AD_RECORD_OVERFLOW,
SSL_R_ENCRYPTED_LENGTH_TOO_LONG);
return -1;
@@ -409,7 +422,7 @@ int ssl3_get_record(SSL *s)
#endif
/* KTLS may use all of the buffer */
- if (BIO_get_ktls_recv(s->rbio) && !is_ktls_left)
+ if (using_ktls)
len = SSL3_BUFFER_get_left(rbuf);
if (thisrr->length > len) {
@@ -518,11 +531,7 @@ int ssl3_get_record(SSL *s)
return 1;
}
- /*
- * KTLS reads full records. If there is any data left,
- * then it is from before enabling ktls
- */
- if (BIO_get_ktls_recv(s->rbio) && !is_ktls_left)
+ if (using_ktls)
goto skip_decryption;
if (s->read_hash != NULL) {
@@ -677,21 +686,29 @@ int ssl3_get_record(SSL *s)
if (SSL_IS_TLS13(s)
&& s->enc_read_ctx != NULL
&& thisrr->type != SSL3_RT_ALERT) {
- size_t end;
+ /*
+ * The following logic are irrelevant in KTLS: the kernel provides
+ * unprotected record and thus record type represent the actual
+ * content type, and padding is already removed and thisrr->type and
+ * thisrr->length should have the correct values.
+ */
+ if (!using_ktls) {
+ size_t end;
- if (thisrr->length == 0
- || thisrr->type != SSL3_RT_APPLICATION_DATA) {
- SSLfatal(s, SSL_AD_UNEXPECTED_MESSAGE, SSL_R_BAD_RECORD_TYPE);
- goto end;
+ if (thisrr->length == 0
+ || thisrr->type != SSL3_RT_APPLICATION_DATA) {
+ SSLfatal(s, SSL_AD_UNEXPECTED_MESSAGE, SSL_R_BAD_RECORD_TYPE);
+ goto end;
+ }
+
+ /* Strip trailing padding */
+ for (end = thisrr->length - 1; end > 0 && thisrr->data[end] == 0;
+ end--)
+ continue;
+
+ thisrr->length = end;
+ thisrr->type = thisrr->data[end];
}
-
- /* Strip trailing padding */
- for (end = thisrr->length - 1; end > 0 && thisrr->data[end] == 0;
- end--)
- continue;
-
- thisrr->length = end;
- thisrr->type = thisrr->data[end];
if (thisrr->type != SSL3_RT_APPLICATION_DATA
&& thisrr->type != SSL3_RT_ALERT
&& thisrr->type != SSL3_RT_HANDSHAKE) {
@@ -700,7 +717,7 @@ int ssl3_get_record(SSL *s)
}
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_INNER_CONTENT_TYPE,
- &thisrr->data[end], 1, s, s->msg_callback_arg);
+ &thisrr->type, 1, s, s->msg_callback_arg);
}
/*
@@ -723,8 +740,7 @@ int ssl3_get_record(SSL *s)
* Therefore we have to rely on KTLS to check the plaintext length
* limit in the kernel.
*/
- if (thisrr->length > SSL3_RT_MAX_PLAIN_LENGTH
- && (!BIO_get_ktls_recv(s->rbio) || is_ktls_left)) {
+ if (thisrr->length > SSL3_RT_MAX_PLAIN_LENGTH && !using_ktls) {
SSLfatal(s, SSL_AD_RECORD_OVERFLOW, SSL_R_DATA_LENGTH_TOO_LONG);
goto end;
}
diff --git ssl/ssl_local.h ssl/ssl_local.h
index 5471e900b8..79ced2f468 100644
--- ssl/ssl_local.h
+++ ssl/ssl_local.h
@@ -2760,9 +2760,9 @@ __owur int ssl_log_secret(SSL *ssl, const char *label,
/* ktls.c */
int ktls_check_supported_cipher(const SSL *s, const EVP_CIPHER *c,
const EVP_CIPHER_CTX *dd);
-int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
+int ktls_configure_crypto(SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
void *rl_sequence, ktls_crypto_info_t *crypto_info,
- unsigned char **rec_seq, unsigned char *iv,
+ int is_tx, unsigned char *iv,
unsigned char *key, unsigned char *mac_key,
size_t mac_secret_size);
# endif
diff --git ssl/t1_enc.c ssl/t1_enc.c
index 237a19cd93..900ba14fbd 100644
--- ssl/t1_enc.c
+++ ssl/t1_enc.c
@@ -98,42 +98,6 @@ static int tls1_generate_key_block(SSL *s, unsigned char *km, size_t num)
return ret;
}
-#ifndef OPENSSL_NO_KTLS
- /*
- * Count the number of records that were not processed yet from record boundary.
- *
- * This function assumes that there are only fully formed records read in the
- * record layer. If read_ahead is enabled, then this might be false and this
- * function will fail.
- */
-# ifndef OPENSSL_NO_KTLS_RX
-static int count_unprocessed_records(SSL *s)
-{
- SSL3_BUFFER *rbuf = RECORD_LAYER_get_rbuf(&s->rlayer);
- PACKET pkt, subpkt;
- int count = 0;
-
- if (!PACKET_buf_init(&pkt, rbuf->buf + rbuf->offset, rbuf->left))
- return -1;
-
- while (PACKET_remaining(&pkt) > 0) {
- /* Skip record type and version */
- if (!PACKET_forward(&pkt, 3))
- return -1;
-
- /* Read until next record */
- if (!PACKET_get_length_prefixed_2(&pkt, &subpkt))
- return -1;
-
- count += 1;
- }
-
- return count;
-}
-# endif
-#endif
-
-
int tls_provider_set_tls_params(SSL *s, EVP_CIPHER_CTX *ctx,
const EVP_CIPHER *ciph,
const EVP_MD *md)
@@ -201,12 +165,7 @@ int tls1_change_cipher_state(SSL *s, int which)
int reuse_dd = 0;
#ifndef OPENSSL_NO_KTLS
ktls_crypto_info_t crypto_info;
- unsigned char *rec_seq;
void *rl_sequence;
-# ifndef OPENSSL_NO_KTLS_RX
- int count_unprocessed;
- int bit;
-# endif
BIO *bio;
#endif
@@ -473,30 +432,11 @@ int tls1_change_cipher_state(SSL *s, int which)
else
rl_sequence = RECORD_LAYER_get_read_sequence(&s->rlayer);
- if (!ktls_configure_crypto(s, c, dd, rl_sequence, &crypto_info, &rec_seq,
- iv, key, ms, *mac_secret_size))
+ if (!ktls_configure_crypto(s, c, dd, rl_sequence, &crypto_info,
+ which & SSL3_CC_WRITE, iv, key, ms,
+ *mac_secret_size))
goto skip_ktls;
- if (which & SSL3_CC_READ) {
-# ifndef OPENSSL_NO_KTLS_RX
- count_unprocessed = count_unprocessed_records(s);
- if (count_unprocessed < 0)
- goto skip_ktls;
-
- /* increment the crypto_info record sequence */
- while (count_unprocessed) {
- for (bit = 7; bit >= 0; bit--) { /* increment */
- ++rec_seq[bit];
- if (rec_seq[bit] != 0)
- break;
- }
- count_unprocessed--;
- }
-# else
- goto skip_ktls;
-# endif
- }
-
/* ktls works with user provided buffers directly */
if (BIO_set_ktls(bio, &crypto_info, which & SSL3_CC_WRITE)) {
if (which & SSL3_CC_WRITE)
diff --git ssl/tls13_enc.c ssl/tls13_enc.c
index 12388922e3..eaab0e2a74 100644
--- ssl/tls13_enc.c
+++ ssl/tls13_enc.c
@@ -434,6 +434,7 @@ int tls13_change_cipher_state(SSL *s, int which)
const EVP_CIPHER *cipher = NULL;
#if !defined(OPENSSL_NO_KTLS) && defined(OPENSSL_KTLS_TLS13)
ktls_crypto_info_t crypto_info;
+ void *rl_sequence;
BIO *bio;
#endif
@@ -688,8 +689,7 @@ int tls13_change_cipher_state(SSL *s, int which)
s->statem.enc_write_state = ENC_WRITE_STATE_VALID;
#ifndef OPENSSL_NO_KTLS
# if defined(OPENSSL_KTLS_TLS13)
- if (!(which & SSL3_CC_WRITE)
- || !(which & SSL3_CC_APPLICATION)
+ if (!(which & SSL3_CC_APPLICATION)
|| (s->options & SSL_OP_ENABLE_KTLS) == 0)
goto skip_ktls;
@@ -705,7 +705,10 @@ int tls13_change_cipher_state(SSL *s, int which)
if (!ktls_check_supported_cipher(s, cipher, ciph_ctx))
goto skip_ktls;
- bio = s->wbio;
+ if (which & SSL3_CC_WRITE)
+ bio = s->wbio;
+ else
+ bio = s->rbio;
if (!ossl_assert(bio != NULL)) {
SSLfatal(s, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
@@ -713,18 +716,26 @@ int tls13_change_cipher_state(SSL *s, int which)
}
/* All future data will get encrypted by ktls. Flush the BIO or skip ktls */
- if (BIO_flush(bio) <= 0)
- goto skip_ktls;
+ if (which & SSL3_CC_WRITE) {
+ if (BIO_flush(bio) <= 0)
+ goto skip_ktls;
+ }
/* configure kernel crypto structure */
- if (!ktls_configure_crypto(s, cipher, ciph_ctx,
- RECORD_LAYER_get_write_sequence(&s->rlayer),
- &crypto_info, NULL, iv, key, NULL, 0))
+ if (which & SSL3_CC_WRITE)
+ rl_sequence = RECORD_LAYER_get_write_sequence(&s->rlayer);
+ else
+ rl_sequence = RECORD_LAYER_get_read_sequence(&s->rlayer);
+
+ if (!ktls_configure_crypto(s, cipher, ciph_ctx, rl_sequence, &crypto_info,
+ which & SSL3_CC_WRITE, iv, key, NULL, 0))
goto skip_ktls;
/* ktls works with user provided buffers directly */
- if (BIO_set_ktls(bio, &crypto_info, which & SSL3_CC_WRITE))
- ssl3_release_write_buffer(s);
+ if (BIO_set_ktls(bio, &crypto_info, which & SSL3_CC_WRITE)) {
+ if (which & SSL3_CC_WRITE)
+ ssl3_release_write_buffer(s);
+ }
skip_ktls:
# endif
#endif
diff --git test/sslapitest.c test/sslapitest.c
index 2911d6e94b..faf2eec2bc 100644
--- test/sslapitest.c
+++ test/sslapitest.c
@@ -1243,7 +1243,7 @@ static int execute_test_ktls(int cis_ktls, int sis_ktls,
#if defined(OPENSSL_NO_KTLS_RX)
rx_supported = 0;
#else
- rx_supported = (tls_version != TLS1_3_VERSION);
+ rx_supported = 1;
#endif
if (!cis_ktls || !rx_supported) {
if (!TEST_false(BIO_get_ktls_recv(clientssl->rbio)))
|