-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathcryptolib.cc
1080 lines (945 loc) · 38.4 KB
/
cryptolib.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "cryptohome/cryptolib.h"
#include <limits>
#include <utility>
#include <vector>
#include <malloc.h>
#include <openssl/err.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/rand.h>
#include <openssl/sha.h>
#include <unistd.h>
#include <base/files/file_util.h>
#include <base/logging.h>
#include <brillo/secure_blob.h>
#include <crypto/libcrypto-compat.h>
#include <crypto/scoped_openssl_types.h>
extern "C" {
#include <scrypt/crypto_scrypt.h>
#include <scrypt/scryptenc.h>
}
#include "cryptohome/platform.h"
using brillo::SecureBlob;
namespace {
template <class T, class U>
T Sha1Helper(const U& data) {
SHA_CTX sha_context;
unsigned char md_value[SHA_DIGEST_LENGTH];
T hash;
SHA1_Init(&sha_context);
SHA1_Update(&sha_context, data.data(), data.size());
SHA1_Final(md_value, &sha_context);
hash.resize(sizeof(md_value));
memcpy(hash.data(), md_value, sizeof(md_value));
// Zero the stack to match expectations set by SecureBlob.
brillo::SecureMemset(md_value, 0, sizeof(md_value));
return hash;
}
template <class T, class U>
T Sha256Helper(const U& data) {
SHA256_CTX sha_context;
unsigned char md_value[SHA256_DIGEST_LENGTH];
T hash;
SHA256_Init(&sha_context);
SHA256_Update(&sha_context, data.data(), data.size());
SHA256_Final(md_value, &sha_context);
hash.resize(sizeof(md_value));
memcpy(hash.data(), md_value, sizeof(md_value));
// Zero the stack to match expectations set by SecureBlob.
brillo::SecureMemset(md_value, 0, sizeof(md_value));
return hash;
}
template <class T>
brillo::SecureBlob HmacSha512Helper(const brillo::SecureBlob& key,
const T& data) {
const int kSha512OutputSize = 64;
unsigned char mac[kSha512OutputSize];
HMAC(EVP_sha512(), key.data(), key.size(), data.data(), data.size(), mac,
NULL);
return brillo::SecureBlob(std::begin(mac), std::end(mac));
}
template <class T>
brillo::SecureBlob HmacSha256Helper(const brillo::SecureBlob& key,
const T& data) {
const int kSha256OutputSize = 32;
unsigned char mac[kSha256OutputSize];
HMAC(EVP_sha256(), key.data(), key.size(), data.data(), data.size(), mac,
NULL);
return brillo::SecureBlob(std::begin(mac), std::end(mac));
}
template <class T>
void BlobToHexToBufferHelper(const T& data,
void* buffer,
size_t buffer_length) {
static const char table[] = "0123456789abcdef";
char* char_buffer = reinterpret_cast<char*>(buffer);
char* char_buffer_end = char_buffer + buffer_length;
for (uint8_t byte : data) {
if (char_buffer == char_buffer_end)
break;
*char_buffer++ = table[(byte >> 4) & 0x0f];
if (char_buffer == char_buffer_end)
break;
*char_buffer++ = table[byte & 0x0f];
}
if (char_buffer != char_buffer_end)
*char_buffer = '\x00';
}
} // namespace
namespace cryptohome {
// The well-known exponent used when generating RSA keys. Cryptohome only
// generates one RSA key, which is the system-wide cryptohome key. This is the
// common public exponent.
const unsigned int kWellKnownExponent = 65537;
// The current number of hash rounds we use. Large enough to be a measurable
// amount of time, but not add too much overhead to login (around 10ms).
const unsigned int kDefaultPasswordRounds = 1337;
// AES block size in bytes.
const unsigned int kAesBlockSize = 16;
// The size of the AES-GCM IV (96-bits).
constexpr unsigned int kAesGcmIVSize = 96 / (sizeof(uint8_t) * CHAR_BIT);
// The size of an AES-GCM key in cryptohome code (256-bits).
constexpr unsigned int kAesGcm256KeySize = 256 / (sizeof(uint8_t) * CHAR_BIT);
// The size of the AES-GCM tag.
constexpr unsigned int kAesGcmTagSize = 16;
// AES key size in bytes (256-bit). This key size is used for all key creation,
// though we currently only use 128 bits for the eCryptfs File Encryption Key
// (FEK). Larger than 128-bit has too great of a CPU overhead on unaccelerated
// architectures.
constexpr unsigned int kDefaultAesKeySize = 32;
// The number of hash rounds we originally used when converting a password to a
// key. This is used when converting older cryptohome vault keysets.
const unsigned int kDefaultLegacyPasswordRounds = 1;
// The maximum number of times to try decryption with the TPM.
constexpr int kTpmDecryptMaxRetries = 2;
// The size in bytes of password blob to be generated by SCrypt. Should be the
// same size as the modulus of cryptohome key, since we need to be able to
// decrypt it.
constexpr unsigned int kDefaultPassBlobSize = 256;
void CryptoLib::GetSecureRandom(unsigned char* buf, size_t length) {
// OpenSSL takes a signed integer. On the off chance that the user requests
// something too large, truncate it.
//
// TODO(b/143445674): correctly handle the 2 corner cases: 1) length exceeds
// limit and 2) RAND_bytes() fails and make relevant changes.
if (length > static_cast<size_t>(std::numeric_limits<int>::max())) {
length = std::numeric_limits<int>::max();
}
RAND_bytes(buf, length);
}
SecureBlob CryptoLib::CreateSecureRandomBlob(size_t length) {
SecureBlob blob(length);
GetSecureRandom(reinterpret_cast<unsigned char*>(blob.data()), length);
return blob;
}
bool CryptoLib::CreateRsaKey(size_t key_bits, SecureBlob* n, SecureBlob* p) {
crypto::ScopedRSA rsa(RSA_new());
crypto::ScopedBIGNUM e(BN_new());
if (!rsa || !e) {
LOG(ERROR) << "Failed to allocate RSA or BIGNUM.";
return false;
}
if (!BN_set_word(e.get(), kWellKnownExponent) ||
!RSA_generate_key_ex(rsa.get(), key_bits, e.get(), nullptr)) {
LOG(ERROR) << "RSA key generation failed.";
return false;
}
SecureBlob local_n(RSA_size(rsa.get()));
const BIGNUM* rsa_n;
RSA_get0_key(rsa.get(), &rsa_n, nullptr, nullptr);
if (BN_bn2bin(rsa_n, local_n.data()) <= 0) {
LOG(ERROR) << "Unable to get modulus from RSA key.";
return false;
}
const BIGNUM* rsa_p;
RSA_get0_factors(rsa.get(), &rsa_p, nullptr);
SecureBlob local_p(BN_num_bytes(rsa_p));
if (BN_bn2bin(rsa_p, local_p.data()) <= 0) {
LOG(ERROR) << "Unable to get private key from RSA key.";
return false;
}
n->swap(local_n);
p->swap(local_p);
return true;
}
bool CryptoLib::FillRsaPrivateKeyFromSecretPrime(
const SecureBlob& secret_prime, RSA* rsa) {
crypto::ScopedOpenSSL<BN_CTX, BN_CTX_free> bn_context(BN_CTX_new());
if (!bn_context) {
LOG(ERROR) << "Failed to allocate BN_CTX structure";
return false;
}
// Load the first prime from the parameter.
crypto::ScopedBIGNUM p(BN_new()), q(BN_new()), remainder(BN_new());
if (!p || !q || !remainder) {
LOG(ERROR) << "Failed to allocate BIGNUM structure";
return false;
}
if (!BN_bin2bn(secret_prime.data(), secret_prime.size(), p.get())) {
LOG(ERROR) << "Failed to construct secret prime from binary blob";
return false;
}
// Calculate the second prime by dividing the public modulus.
const BIGNUM* rsa_n;
const BIGNUM* rsa_e;
RSA_get0_key(rsa, &rsa_n, &rsa_e, nullptr);
if (!BN_div(q.get(), remainder.get(), rsa_n, p.get(), bn_context.get())) {
LOG(ERROR) << "Failed to divide public modulus";
return false;
}
if (!BN_is_zero(remainder.get())) {
LOG(ERROR) << "Bad secret prime: does not divide the modulus evenly";
return false;
}
// Calculate the private exponent.
crypto::ScopedBIGNUM d(BN_new());
crypto::ScopedBIGNUM decremented_p(BN_new());
crypto::ScopedBIGNUM decremented_q(BN_new());
crypto::ScopedBIGNUM totient(BN_new());
if (!d || !decremented_p || !decremented_q || !totient) {
LOG(ERROR) << "Failed to allocate BIGNUM structure";
return false;
}
if (!BN_sub(decremented_p.get(), p.get(), BN_value_one()) ||
!BN_sub(decremented_q.get(), q.get(), BN_value_one()) ||
!BN_mul(totient.get(), decremented_p.get(), decremented_q.get(),
bn_context.get())) {
LOG(ERROR) << "Failed to calculate totient function";
return false;
}
if (!BN_mod_inverse(d.get(), rsa_e, totient.get(), bn_context.get())) {
LOG(ERROR) << "Failed to calculate modular inverse";
return false;
}
// Calculate the private exponent modulo the decremented first and second
// primes.
crypto::ScopedBIGNUM dmp1(BN_new()), dmq1(BN_new()), iqmp(BN_new());
if (!dmp1 || !dmq1 || !iqmp) {
LOG(ERROR) << "Failed to allocate BIGNUM structure";
return false;
}
if (!BN_mod(dmp1.get(), d.get(), decremented_p.get(), bn_context.get()) ||
!BN_mod(dmq1.get(), d.get(), decremented_q.get(), bn_context.get())) {
LOG(ERROR) << "Failed to calculate the private exponent over the modulo";
return false;
}
// Calculate the inverse of the second prime modulo the first prime.
if (!BN_mod_inverse(iqmp.get(), q.get(), p.get(), bn_context.get())) {
LOG(ERROR) << "Failed to calculate the inverse of the prime module the "
"other prime";
return false;
}
// All checks pass, now assign fields
if (!RSA_set0_factors(rsa, p.release(), q.release()) ||
!RSA_set0_key(rsa, nullptr, nullptr, d.release()) ||
!RSA_set0_crt_params(rsa, dmp1.release(), dmq1.release(),
iqmp.release())) {
LOG(ERROR) << "Failed to set RSA parameters.";
return false;
}
return true;
}
brillo::Blob CryptoLib::Sha1(const brillo::Blob& data) {
return Sha1Helper<brillo::Blob, brillo::Blob>(data);
}
brillo::SecureBlob CryptoLib::Sha1ToSecureBlob(const brillo::Blob& data) {
return Sha1Helper<brillo::SecureBlob, brillo::Blob>(data);
}
brillo::SecureBlob CryptoLib::Sha1(const brillo::SecureBlob& data) {
return Sha1Helper<brillo::SecureBlob, brillo::SecureBlob>(data);
}
brillo::Blob CryptoLib::Sha256(const brillo::Blob& data) {
return Sha256Helper<brillo::Blob, brillo::Blob>(data);
}
brillo::SecureBlob CryptoLib::Sha256ToSecureBlob(const brillo::Blob& data) {
return Sha256Helper<brillo::SecureBlob, brillo::Blob>(data);
}
brillo::SecureBlob CryptoLib::Sha256(const brillo::SecureBlob& data) {
return Sha256Helper<brillo::SecureBlob, brillo::SecureBlob>(data);
}
brillo::SecureBlob CryptoLib::HmacSha512(const brillo::SecureBlob& key,
const brillo::Blob& data) {
return HmacSha512Helper(key, data);
}
brillo::SecureBlob CryptoLib::HmacSha512(const brillo::SecureBlob& key,
const brillo::SecureBlob& data) {
return HmacSha512Helper(key, data);
}
brillo::SecureBlob CryptoLib::HmacSha256(const brillo::SecureBlob& key,
const brillo::Blob& data) {
return HmacSha256Helper(key, data);
}
brillo::SecureBlob CryptoLib::HmacSha256(const brillo::SecureBlob& key,
const brillo::SecureBlob& data) {
return HmacSha256Helper(key, data);
}
size_t CryptoLib::GetAesBlockSize() {
return EVP_CIPHER_block_size(EVP_aes_256_cbc());
}
bool CryptoLib::PasskeyToAesKey(const brillo::SecureBlob& passkey,
const brillo::SecureBlob& salt,
unsigned int rounds,
SecureBlob* key, SecureBlob* iv) {
if (salt.size() != PKCS5_SALT_LEN) {
LOG(ERROR) << "Bad salt size.";
return false;
}
const EVP_CIPHER* cipher = EVP_aes_256_cbc();
SecureBlob aes_key(EVP_CIPHER_key_length(cipher));
SecureBlob local_iv(EVP_CIPHER_iv_length(cipher));
// Convert the passkey to a key
if (!EVP_BytesToKey(cipher,
EVP_sha1(),
salt.data(),
passkey.data(),
passkey.size(),
rounds,
aes_key.data(),
local_iv.data())) {
LOG(ERROR) << "Failure converting bytes to key";
return false;
}
key->swap(aes_key);
if (iv) {
iv->swap(local_iv);
}
return true;
}
bool CryptoLib::AesGcmDecrypt(const brillo::SecureBlob& ciphertext,
const brillo::SecureBlob& tag,
const brillo::SecureBlob& key,
const brillo::SecureBlob& iv,
brillo::SecureBlob* plaintext) {
CHECK_EQ(key.size(), kAesGcm256KeySize);
CHECK_EQ(iv.size(), kAesGcmIVSize);
CHECK_EQ(tag.size(), kAesGcmTagSize);
crypto::ScopedEVP_CIPHER_CTX ctx(EVP_CIPHER_CTX_new());
if (ctx.get() == nullptr) {
LOG(ERROR) << "Failed to create cipher ctx.";
return false;
}
if (EVP_DecryptInit_ex(ctx.get(), EVP_aes_256_gcm(), nullptr, nullptr,
nullptr) != 1) {
LOG(ERROR) << "Failed to init decrypt.";
return false;
}
if (EVP_CIPHER_CTX_ctrl(ctx.get(), EVP_CTRL_GCM_SET_IVLEN, kAesGcmIVSize,
nullptr) != 1) {
LOG(ERROR) << "Failed to set iv size.";
return false;
}
if (EVP_DecryptInit_ex(ctx.get(), nullptr, nullptr, key.data(), iv.data()) !=
1) {
LOG(ERROR) << "Failed to add key and iv to decrypt operation.";
return false;
}
plaintext->resize(ciphertext.size());
int output_size = 0;
if (EVP_DecryptUpdate(ctx.get(), plaintext->data(), &output_size,
ciphertext.data(), ciphertext.size()) != 1) {
LOG(ERROR) << "Failed to decrypt the plaintext.";
return false;
}
if (output_size != ciphertext.size()) {
LOG(ERROR) << "Failed to process entire ciphertext.";
return false;
}
uint8_t* tag_ptr = const_cast<uint8_t*>(tag.data());
if (EVP_CIPHER_CTX_ctrl(ctx.get(), EVP_CTRL_GCM_SET_TAG, tag.size(),
tag_ptr) != 1) {
LOG(ERROR) << "Failed to set the tag.";
return false;
}
output_size = 0;
int ret_val = EVP_DecryptFinal_ex(ctx.get(), nullptr, &output_size);
return output_size == 0 && ret_val > 0;
}
bool CryptoLib::AesGcmEncrypt(const brillo::SecureBlob& plaintext,
const brillo::SecureBlob& key,
brillo::SecureBlob* iv,
brillo::SecureBlob* tag,
brillo::SecureBlob* ciphertext) {
CHECK_EQ(key.size(), kAesGcm256KeySize);
iv->resize(kAesGcmIVSize);
GetSecureRandom(iv->data(), kAesGcmIVSize);
crypto::ScopedEVP_CIPHER_CTX ctx(EVP_CIPHER_CTX_new());
if (ctx.get() == nullptr) {
LOG(ERROR) << "Failed to create context.";
return false;
}
if (EVP_EncryptInit_ex(ctx.get(), EVP_aes_256_gcm(), nullptr, nullptr,
nullptr) != 1) {
LOG(ERROR) << "Failed to init aes-gcm-256.";
return false;
}
if (EVP_CIPHER_CTX_ctrl(ctx.get(), EVP_CTRL_GCM_SET_IVLEN, kAesGcmIVSize,
nullptr) != 1) {
LOG(ERROR) << "Failed to set IV length.";
return false;
}
if (EVP_EncryptInit_ex(ctx.get(), nullptr, nullptr, key.data(), iv->data()) !=
1) {
LOG(ERROR) << "Failed to init key and iv.";
return false;
}
ciphertext->resize(plaintext.size());
int processed_bytes = 0;
if (EVP_EncryptUpdate(ctx.get(), ciphertext->data(), &processed_bytes,
plaintext.data(), plaintext.size()) != 1) {
LOG(ERROR) << "Failed to encrypt plaintext.";
return false;
}
if (plaintext.size() != processed_bytes) {
LOG(ERROR) << "Did not process the entire plaintext.";
return false;
}
int unused_output_length;
if (EVP_EncryptFinal_ex(ctx.get(), nullptr, &unused_output_length) != 1) {
LOG(ERROR) << "Failed to finalize encryption.";
return false;
}
tag->resize(kAesGcmTagSize);
if (EVP_CIPHER_CTX_ctrl(ctx.get(), EVP_CTRL_GCM_GET_TAG, kAesGcmTagSize,
tag->data()) != 1) {
LOG(ERROR) << "Failed to retrieve tag.";
return false;
}
return true;
}
bool CryptoLib::AesEncrypt(const SecureBlob& plaintext,
const SecureBlob& key,
const SecureBlob& iv,
SecureBlob* ciphertext) {
return AesEncryptSpecifyBlockMode(plaintext, 0, plaintext.size(), key, iv,
kPaddingCryptohomeDefault, kCbc,
ciphertext);
}
bool CryptoLib::AesDecrypt(const SecureBlob& ciphertext,
const SecureBlob& key,
const SecureBlob& iv,
SecureBlob* plaintext) {
return AesDecryptSpecifyBlockMode(ciphertext, 0, ciphertext.size(), key, iv,
kPaddingCryptohomeDefault, kCbc, plaintext);
}
// This is the reverse operation of AesEncryptSpecifyBlockMode above. See that
// method for a description of how padding and block_mode affect the crypto
// operations. This method automatically removes and verifies the padding, so
// plain_text (on success) will contain the original data.
//
// Note that a call to AesDecryptSpecifyBlockMode needs to have the same padding
// and block_mode as the corresponding encrypt call. Changing the block mode
// will drastically alter the decryption. And an incorrect PaddingScheme will
// result in the padding verification failing, for which the method call fails,
// even if the key and initialization vector were correct.
bool CryptoLib::AesDecryptSpecifyBlockMode(const SecureBlob& encrypted,
unsigned int start,
unsigned int count,
const SecureBlob& key,
const SecureBlob& iv,
PaddingScheme padding,
BlockMode block_mode,
SecureBlob* plain_text) {
if ((start > encrypted.size()) ||
((start + count) > encrypted.size()) ||
((start + count) < start)) {
return false;
}
SecureBlob local_plain_text(count);
if (local_plain_text.size() >
static_cast<unsigned int>(std::numeric_limits<int>::max())) {
// EVP_DecryptUpdate takes a signed int
return false;
}
int final_size = 0;
int decrypt_size = local_plain_text.size();
const EVP_CIPHER* cipher;
switch (block_mode) {
case kCbc:
cipher = EVP_aes_256_cbc();
break;
case kEcb:
cipher = EVP_aes_256_ecb();
break;
default:
LOG(ERROR) << "Invalid block mode specified: " << block_mode;
return false;
}
if (key.size() != static_cast<unsigned int>(EVP_CIPHER_key_length(cipher))) {
LOG(ERROR) << "Invalid key length of " << key.size()
<< ", expected " << EVP_CIPHER_key_length(cipher);
return false;
}
// ECB ignores the IV, so only check the IV length if we are using a different
// block mode.
if ((block_mode != kEcb) &&
(iv.size() != static_cast<unsigned int>(EVP_CIPHER_iv_length(cipher)))) {
LOG(ERROR) << "Invalid iv length of " << iv.size()
<< ", expected " << EVP_CIPHER_iv_length(cipher);
return false;
}
crypto::ScopedEVP_CIPHER_CTX decryption_context(EVP_CIPHER_CTX_new());
if (!decryption_context) {
LOG(ERROR) << "Failed to allocate EVP_CIPHER_CTX";
return false;
}
EVP_DecryptInit_ex(decryption_context.get(), cipher, nullptr, key.data(),
iv.data());
if (padding == kPaddingNone) {
EVP_CIPHER_CTX_set_padding(decryption_context.get(), 0);
}
// Make sure we're not pointing into an empty buffer or past the end.
const unsigned char *encrypted_buf = NULL;
if (start < encrypted.size())
encrypted_buf = &encrypted[start];
if (!EVP_DecryptUpdate(decryption_context.get(), local_plain_text.data(),
&decrypt_size, encrypted_buf, count)) {
LOG(ERROR) << "DecryptUpdate failed";
return false;
}
// In the case of local_plain_text being full, we must avoid trying to
// point past the end of the buffer when calling EVP_DecryptFinal_ex().
unsigned char *final_buf = NULL;
if (static_cast<unsigned int>(decrypt_size) < local_plain_text.size())
final_buf = &local_plain_text[decrypt_size];
if (!EVP_DecryptFinal_ex(decryption_context.get(), final_buf, &final_size)) {
unsigned long err = ERR_get_error(); // NOLINT openssl types
ERR_load_ERR_strings();
ERR_load_crypto_strings();
LOG(ERROR) << "DecryptFinal Error: " << err
<< ": " << ERR_lib_error_string(err)
<< ", " << ERR_func_error_string(err)
<< ", " << ERR_reason_error_string(err);
return false;
}
final_size += decrypt_size;
if (padding == kPaddingCryptohomeDefault) {
if (final_size < SHA_DIGEST_LENGTH) {
LOG(ERROR) << "Plain text was too small.";
return false;
}
final_size -= SHA_DIGEST_LENGTH;
SHA_CTX sha_context;
unsigned char md_value[SHA_DIGEST_LENGTH];
SHA1_Init(&sha_context);
SHA1_Update(&sha_context, local_plain_text.data(), final_size);
SHA1_Final(md_value, &sha_context);
const unsigned char* md_ptr = local_plain_text.data();
md_ptr += final_size;
if (brillo::SecureMemcmp(md_ptr, md_value, SHA_DIGEST_LENGTH)) {
LOG(ERROR) << "Digest verification failed.";
return false;
}
}
local_plain_text.resize(final_size);
plain_text->swap(local_plain_text);
return true;
}
// AesEncryptSpecifyBlockMode encrypts the bytes in plain_text using AES,
// placing the output into encrypted. Aside from range constraints (start and
// count) and the key and initialization vector, this method has two parameters
// that control how the ciphertext is generated and are useful in encrypting
// specific types of data in cryptohome.
//
// First, padding specifies whether and how the plaintext is padded before
// encryption. The three options, described in the PaddingScheme enumeration
// are used as such:
// - kPaddingNone is used to mix the user's passkey (derived from the
// password) into the encrypted blob storing the vault keyset when the TPM
// is used. This is described in more detail in the README file. There is
// no padding in this case, and the size of plain_text needs to be a
// multiple of the AES block size (16 bytes).
// - kPaddingStandard uses standard PKCS padding, which is the default for
// OpenSSL.
// - kPaddingCryptohomeDefault appends a SHA1 hash of the plaintext in
// plain_text before passing it to OpenSSL, which still uses PKCS padding
// so that we do not have to re-implement block-multiple padding ourselves.
// This padding scheme allows us to strongly verify the plaintext on
// decryption, which is essential when, for example, test decrypting a nonce
// to test whether a password was correct (we do this in user_session.cc).
//
// The block mode switches between ECB and CBC. Generally, CBC is used for most
// AES crypto that we perform, since it is a better mode for us for data that is
// larger than the block size. We use ECB only when mixing the user passkey
// into the TPM-encrypted blob, since we only encrypt a single block of that
// data.
bool CryptoLib::AesEncryptSpecifyBlockMode(const SecureBlob& plain_text,
unsigned int start,
unsigned int count,
const SecureBlob& key,
const SecureBlob& iv,
PaddingScheme padding,
BlockMode block_mode,
SecureBlob* encrypted) {
// Verify that the range is within the data passed
if ((start > plain_text.size()) ||
((start + count) > plain_text.size()) ||
((start + count) < start)) {
return false;
}
if (count > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
// EVP_EncryptUpdate takes a signed int
return false;
}
// First set the output size based on the padding scheme. No padding means
// that the input needs to be a multiple of the block size, and the output
// size is equal to the input size. Standard padding means we should allocate
// up to a full block additional for the PKCS padding. Cryptohome default
// means we should allocate a full block additional for the PKCS padding and
// enough for a SHA1 hash.
unsigned int block_size = GetAesBlockSize();
unsigned int needed_size = count;
switch (padding) {
case kPaddingCryptohomeDefault:
// The AES block size and SHA digest length are not enough for this to
// overflow, as needed_size is initialized to count, which must be <=
// INT_MAX, but needed_size is itself an unsigned. The block size and
// digest length are fixed by the algorithm.
needed_size += block_size + SHA_DIGEST_LENGTH;
break;
case kPaddingStandard:
needed_size += block_size;
break;
case kPaddingNone:
if (count % block_size) {
LOG(ERROR) << "Data size (" << count << ") was not a multiple "
<< "of the block size (" << block_size << ")";
return false;
}
break;
default:
LOG(ERROR) << "Invalid padding specified";
return false;
break;
}
SecureBlob cipher_text(needed_size);
// Set the block mode
const EVP_CIPHER* cipher;
switch (block_mode) {
case kCbc:
cipher = EVP_aes_256_cbc();
break;
case kEcb:
cipher = EVP_aes_256_ecb();
break;
default:
LOG(ERROR) << "Invalid block mode specified";
return false;
}
if (key.size() != static_cast<unsigned int>(EVP_CIPHER_key_length(cipher))) {
LOG(ERROR) << "Invalid key length of " << key.size()
<< ", expected " << EVP_CIPHER_key_length(cipher);
return false;
}
// ECB ignores the IV, so only check the IV length if we are using a different
// block mode.
if ((block_mode != kEcb) &&
(iv.size() != static_cast<unsigned int>(EVP_CIPHER_iv_length(cipher)))) {
LOG(ERROR) << "Invalid iv length of " << iv.size()
<< ", expected " << EVP_CIPHER_iv_length(cipher);
return false;
}
// Initialize the OpenSSL crypto context
crypto::ScopedEVP_CIPHER_CTX encryption_context(EVP_CIPHER_CTX_new());
if (!encryption_context) {
LOG(ERROR) << "Failed to allocate EVP_CIPHER_CTX";
return false;
}
EVP_EncryptInit_ex(encryption_context.get(), cipher, nullptr, key.data(),
iv.data());
if (padding == kPaddingNone) {
EVP_CIPHER_CTX_set_padding(encryption_context.get(), 0);
}
// First, encrypt the plain_text data
unsigned int current_size = 0;
int encrypt_size = 0;
// Make sure we're not pointing into an empty buffer or past the end.
const unsigned char *plain_buf = NULL;
if (start < plain_text.size())
plain_buf = &plain_text[start];
if (!EVP_EncryptUpdate(encryption_context.get(), &cipher_text[current_size],
&encrypt_size, plain_buf, count)) {
LOG(ERROR) << "EncryptUpdate failed";
return false;
}
current_size += encrypt_size;
encrypt_size = 0;
// Next, if the padding uses the cryptohome default scheme, encrypt a SHA1
// hash of the preceding plain_text into the output data
if (padding == kPaddingCryptohomeDefault) {
SHA_CTX sha_context;
unsigned char md_value[SHA_DIGEST_LENGTH];
SHA1_Init(&sha_context);
SHA1_Update(&sha_context, &plain_text[start], count);
SHA1_Final(md_value, &sha_context);
if (!EVP_EncryptUpdate(encryption_context.get(), &cipher_text[current_size],
&encrypt_size, md_value, sizeof(md_value))) {
LOG(ERROR) << "EncryptUpdate failed";
return false;
}
current_size += encrypt_size;
encrypt_size = 0;
}
// In the case of cipher_text being full, we must avoid trying to
// point past the end of the buffer when calling EVP_EncryptFinal_ex().
unsigned char *final_buf = NULL;
if (static_cast<unsigned int>(current_size) < cipher_text.size())
final_buf = &cipher_text[current_size];
// Finally, finish the encryption
if (!EVP_EncryptFinal_ex(encryption_context.get(), final_buf,
&encrypt_size)) {
LOG(ERROR) << "EncryptFinal failed";
return false;
}
current_size += encrypt_size;
cipher_text.resize(current_size);
encrypted->swap(cipher_text);
return true;
}
// Obscure (and Unobscure) RSA messages.
// Let k be a key derived from the user passphrase. On disk, we store
// m = ObscureRSAMessage(RSA-on-TPM(random-data), k). The reason for this
// function is the existence of an ambiguity in the TPM spec: the format of data
// returned by Tspi_Data_Bind is unspecified, so it's _possible_ (although does
// not happen in practice) that RSA-on-TPM(random-data) could start with some
// kind of ASN.1 header or whatever (some known data). If this was true, and we
// encrypted all of RSA-on-TPM(random-data), then one could test values of k by
// decrypting RSA-on-TPM(random-data) and looking for the known header, which
// would allow brute-forcing the user passphrase without talking to the TPM.
//
// Therefore, we instead encrypt _one block_ of RSA-on-TPM(random-data) with AES
// in ECB mode; we pick the last AES block, in the hope that that block will be
// part of the RSA message. TODO(ellyjones): why? if the TPM could add a header,
// it could also add a footer, and we'd be just as sunk.
//
// If we do encrypt part of the RSA message, the entirety of
// RSA-on-TPM(random-data) should be impossible to decrypt, without encrypting
// any known plaintext. This approach also requires brute-force attempts on k to
// go through the TPM, since there's no way to test a potential decryption
// without doing UnRSA-on-TPM() to see if the message is valid now.
bool CryptoLib::ObscureRSAMessage(const SecureBlob& plaintext,
const SecureBlob& key,
SecureBlob* ciphertext) {
unsigned int aes_block_size = GetAesBlockSize();
if (plaintext.size() < aes_block_size * 2) {
LOG(ERROR) << "Plaintext is too small.";
return false;
}
unsigned int offset = plaintext.size() - aes_block_size;
SecureBlob obscured_chunk;
if (!AesEncryptSpecifyBlockMode(plaintext, offset, aes_block_size, key,
SecureBlob(0), kPaddingNone, kEcb,
&obscured_chunk)) {
LOG(ERROR) << "AES encryption failed.";
return false;
}
ciphertext->resize(plaintext.size());
char *data = reinterpret_cast<char*>(ciphertext->data());
memcpy(data, plaintext.data(), plaintext.size());
memcpy(data + offset, obscured_chunk.data(), obscured_chunk.size());
return true;
}
bool CryptoLib::UnobscureRSAMessage(const SecureBlob& ciphertext,
const SecureBlob& key,
SecureBlob* plaintext) {
unsigned int aes_block_size = GetAesBlockSize();
if (ciphertext.size() < aes_block_size * 2) {
LOG(ERROR) << "Ciphertext is is too small.";
return false;
}
unsigned int offset = ciphertext.size() - aes_block_size;
SecureBlob unobscured_chunk;
if (!AesDecryptSpecifyBlockMode(ciphertext, offset, aes_block_size, key,
SecureBlob(0), kPaddingNone, kEcb,
&unobscured_chunk)) {
LOG(ERROR) << "AES decryption failed.";
return false;
}
plaintext->resize(ciphertext.size());
char *data = reinterpret_cast<char*>(plaintext->data());
memcpy(data, ciphertext.data(), ciphertext.size());
memcpy(data + offset, unobscured_chunk.data(),
unobscured_chunk.size());
return true;
}
bool CryptoLib::RsaOaepEncrypt(const brillo::SecureBlob& plaintext,
RSA* key,
brillo::Blob* ciphertext) {
if (plaintext.empty())
return false;
ciphertext->resize(RSA_size(key));
const int encryption_result =
RSA_public_encrypt(plaintext.size(), plaintext.data(), ciphertext->data(),
key, RSA_PKCS1_OAEP_PADDING);
if (encryption_result == -1) {
LOG(ERROR) << "Failed to perform RSAES-OAEP MGF1 encryption";
return false;
}
if (encryption_result != ciphertext->size()) {
NOTREACHED()
<< "RSAES-OAEP MGF1 encryption returned unexpected amount of data";
return false;
}
return true;
}
bool CryptoLib::RsaOaepDecrypt(const brillo::SecureBlob& ciphertext,
const brillo::SecureBlob& oaep_label,
RSA* key,
brillo::SecureBlob* plaintext) {
const int key_size = RSA_size(key);
SecureBlob raw_decrypted_data(key_size);
const int decryption_result =
RSA_private_decrypt(ciphertext.size(), ciphertext.data(),
raw_decrypted_data.data(), key, RSA_NO_PADDING);
if (decryption_result == -1) {
LOG(ERROR) << "RSA raw decryption failed: "
<< ERR_error_string(ERR_get_error(), nullptr);
return false;
}
if (decryption_result != key_size) {
LOG(ERROR) << "RSA raw decryption returned too few data";
return false;
}
SecureBlob local_plaintext(key_size);
const int padding_check_result = RSA_padding_check_PKCS1_OAEP(
local_plaintext.data(), local_plaintext.size(), raw_decrypted_data.data(),
raw_decrypted_data.size(), key_size, oaep_label.data(),
oaep_label.size());
if (padding_check_result == -1) {
LOG(ERROR)
<< "Failed to perform RSA OAEP decoding of the raw decrypted data";
return false;
}
local_plaintext.resize(padding_check_result);
*plaintext = std::move(local_plaintext);
return true;
}
std::string CryptoLib::BlobToHex(const brillo::Blob& blob) {
std::string buffer(blob.size() * 2, '\x00');
BlobToHexToBuffer(blob, &buffer[0], buffer.size());
return buffer;
}
std::string CryptoLib::SecureBlobToHex(const brillo::SecureBlob& blob) {
std::string buffer(blob.size() * 2, '\x00');
SecureBlobToHexToBuffer(blob, &buffer[0], buffer.size());
return buffer;
}
void CryptoLib::BlobToHexToBuffer(const brillo::Blob& blob,
void* buffer,
size_t buffer_length) {
BlobToHexToBufferHelper(blob, buffer, buffer_length);
}
void CryptoLib::SecureBlobToHexToBuffer(const brillo::SecureBlob& blob,
void* buffer,
size_t buffer_length) {
BlobToHexToBufferHelper(blob, buffer, buffer_length);
}
std::string CryptoLib::ComputeEncryptedDataHMAC(
const EncryptedData& encrypted_data, const SecureBlob& hmac_key) {
SecureBlob blob1(encrypted_data.iv().begin(), encrypted_data.iv().end());
SecureBlob blob2(encrypted_data.encrypted_data().begin(),
encrypted_data.encrypted_data().end());
SecureBlob result = SecureBlob::Combine(blob1, blob2);
SecureBlob hmac = HmacSha512(hmac_key, result);
return hmac.to_string();
}
bool CryptoLib::TpmCompatibleOAEPEncrypt(RSA* key,
const brillo::SecureBlob& input,
brillo::SecureBlob* output) {
CHECK(output);
// The custom OAEP parameter as specified in TPM Main Part 1, Section 31.1.1.
const unsigned char oaep_param[4] = {'T', 'C', 'P', 'A'};
brillo::SecureBlob padded_input(RSA_size(key));
unsigned char* padded_buffer = padded_input.data();
const unsigned char* input_buffer = input.data();
int result = RSA_padding_add_PKCS1_OAEP(padded_buffer, padded_input.size(),
input_buffer, input.size(),
oaep_param, arraysize(oaep_param));
if (!result) {
LOG(ERROR) << "Failed to add OAEP padding.";
return false;
}
output->resize(padded_input.size());
unsigned char* output_buffer = output->data();
result = RSA_public_encrypt(padded_input.size(), padded_buffer,
output_buffer, key, RSA_NO_PADDING);
if (result == -1) {
LOG(ERROR) << "Failed to encrypt OAEP padded input.";
return false;
}
return true;
}
// Checks an RSA key modulus for the ROCA fingerprint (i.e. whether the RSA
// modulus has a discrete logarithm modulus small primes). See research paper
// for details: https://crocs.fi.muni.cz/public/papers/rsa_ccs17
bool CryptoLib::TestRocaVulnerable(const BIGNUM* rsa_modulus) {
const BN_ULONG kPrimes[] = {