@@ -648,7 +648,7 @@ static size_t secp256k1_strauss_scratch_size(size_t n_points) {
648
648
return n_points * point_size ;
649
649
}
650
650
651
- static int secp256k1_ecmult_strauss_batch (const secp256k1_ecmult_context * ctx , secp256k1_scratch * scratch , secp256k1_gej * r , const secp256k1_scalar * inp_g_sc , secp256k1_ecmult_multi_callback cb , void * cbdata , size_t n_points , size_t cb_offset ) {
651
+ static int secp256k1_ecmult_strauss_batch (const secp256k1_callback * error_callback , const secp256k1_ecmult_context * ctx , secp256k1_scratch * scratch , secp256k1_gej * r , const secp256k1_scalar * inp_g_sc , secp256k1_ecmult_multi_callback cb , void * cbdata , size_t n_points , size_t cb_offset ) {
652
652
secp256k1_gej * points ;
653
653
secp256k1_scalar * scalars ;
654
654
struct secp256k1_strauss_state state ;
@@ -659,41 +659,41 @@ static int secp256k1_ecmult_strauss_batch(const secp256k1_ecmult_context *ctx, s
659
659
return 1 ;
660
660
}
661
661
662
- if (!secp256k1_scratch_allocate_frame (scratch , secp256k1_strauss_scratch_size (n_points ), STRAUSS_SCRATCH_OBJECTS )) {
662
+ if (!secp256k1_scratch_allocate_frame (error_callback , scratch , secp256k1_strauss_scratch_size (n_points ), STRAUSS_SCRATCH_OBJECTS )) {
663
663
return 0 ;
664
664
}
665
- points = (secp256k1_gej * )secp256k1_scratch_alloc (scratch , n_points * sizeof (secp256k1_gej ));
666
- scalars = (secp256k1_scalar * )secp256k1_scratch_alloc (scratch , n_points * sizeof (secp256k1_scalar ));
667
- state .prej = (secp256k1_gej * )secp256k1_scratch_alloc (scratch , n_points * ECMULT_TABLE_SIZE (WINDOW_A ) * sizeof (secp256k1_gej ));
668
- state .zr = (secp256k1_fe * )secp256k1_scratch_alloc (scratch , n_points * ECMULT_TABLE_SIZE (WINDOW_A ) * sizeof (secp256k1_fe ));
665
+ points = (secp256k1_gej * )secp256k1_scratch_alloc (error_callback , scratch , n_points * sizeof (secp256k1_gej ));
666
+ scalars = (secp256k1_scalar * )secp256k1_scratch_alloc (error_callback , scratch , n_points * sizeof (secp256k1_scalar ));
667
+ state .prej = (secp256k1_gej * )secp256k1_scratch_alloc (error_callback , scratch , n_points * ECMULT_TABLE_SIZE (WINDOW_A ) * sizeof (secp256k1_gej ));
668
+ state .zr = (secp256k1_fe * )secp256k1_scratch_alloc (error_callback , scratch , n_points * ECMULT_TABLE_SIZE (WINDOW_A ) * sizeof (secp256k1_fe ));
669
669
#ifdef USE_ENDOMORPHISM
670
- state .pre_a = (secp256k1_ge * )secp256k1_scratch_alloc (scratch , n_points * 2 * ECMULT_TABLE_SIZE (WINDOW_A ) * sizeof (secp256k1_ge ));
670
+ state .pre_a = (secp256k1_ge * )secp256k1_scratch_alloc (error_callback , scratch , n_points * 2 * ECMULT_TABLE_SIZE (WINDOW_A ) * sizeof (secp256k1_ge ));
671
671
state .pre_a_lam = state .pre_a + n_points * ECMULT_TABLE_SIZE (WINDOW_A );
672
672
#else
673
- state .pre_a = (secp256k1_ge * )secp256k1_scratch_alloc (scratch , n_points * ECMULT_TABLE_SIZE (WINDOW_A ) * sizeof (secp256k1_ge ));
673
+ state .pre_a = (secp256k1_ge * )secp256k1_scratch_alloc (error_callback , scratch , n_points * ECMULT_TABLE_SIZE (WINDOW_A ) * sizeof (secp256k1_ge ));
674
674
#endif
675
- state .ps = (struct secp256k1_strauss_point_state * )secp256k1_scratch_alloc (scratch , n_points * sizeof (struct secp256k1_strauss_point_state ));
675
+ state .ps = (struct secp256k1_strauss_point_state * )secp256k1_scratch_alloc (error_callback , scratch , n_points * sizeof (struct secp256k1_strauss_point_state ));
676
676
677
677
for (i = 0 ; i < n_points ; i ++ ) {
678
678
secp256k1_ge point ;
679
679
if (!cb (& scalars [i ], & point , i + cb_offset , cbdata )) {
680
- secp256k1_scratch_deallocate_frame (scratch );
680
+ secp256k1_scratch_deallocate_frame (error_callback , scratch );
681
681
return 0 ;
682
682
}
683
683
secp256k1_gej_set_ge (& points [i ], & point );
684
684
}
685
685
secp256k1_ecmult_strauss_wnaf (ctx , & state , r , n_points , points , scalars , inp_g_sc );
686
- secp256k1_scratch_deallocate_frame (scratch );
686
+ secp256k1_scratch_deallocate_frame (error_callback , scratch );
687
687
return 1 ;
688
688
}
689
689
690
690
/* Wrapper for secp256k1_ecmult_multi_func interface */
691
- static int secp256k1_ecmult_strauss_batch_single (const secp256k1_ecmult_context * actx , secp256k1_scratch * scratch , secp256k1_gej * r , const secp256k1_scalar * inp_g_sc , secp256k1_ecmult_multi_callback cb , void * cbdata , size_t n ) {
692
- return secp256k1_ecmult_strauss_batch (actx , scratch , r , inp_g_sc , cb , cbdata , n , 0 );
691
+ static int secp256k1_ecmult_strauss_batch_single (const secp256k1_callback * error_callback , const secp256k1_ecmult_context * actx , secp256k1_scratch * scratch , secp256k1_gej * r , const secp256k1_scalar * inp_g_sc , secp256k1_ecmult_multi_callback cb , void * cbdata , size_t n ) {
692
+ return secp256k1_ecmult_strauss_batch (error_callback , actx , scratch , r , inp_g_sc , cb , cbdata , n , 0 );
693
693
}
694
694
695
- static size_t secp256k1_strauss_max_points (secp256k1_scratch * scratch ) {
696
- return secp256k1_scratch_max_allocation (scratch , STRAUSS_SCRATCH_OBJECTS ) / secp256k1_strauss_scratch_size (1 );
695
+ static size_t secp256k1_strauss_max_points (const secp256k1_callback * error_callback , secp256k1_scratch * scratch ) {
696
+ return secp256k1_scratch_max_allocation (error_callback , scratch , STRAUSS_SCRATCH_OBJECTS ) / secp256k1_strauss_scratch_size (1 );
697
697
}
698
698
699
699
/** Convert a number to WNAF notation.
@@ -985,7 +985,7 @@ static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_windo
985
985
return (sizeof (secp256k1_gej ) << bucket_window ) + sizeof (struct secp256k1_pippenger_state ) + entries * entry_size ;
986
986
}
987
987
988
- static int secp256k1_ecmult_pippenger_batch (const secp256k1_ecmult_context * ctx , secp256k1_scratch * scratch , secp256k1_gej * r , const secp256k1_scalar * inp_g_sc , secp256k1_ecmult_multi_callback cb , void * cbdata , size_t n_points , size_t cb_offset ) {
988
+ static int secp256k1_ecmult_pippenger_batch (const secp256k1_callback * error_callback , const secp256k1_ecmult_context * ctx , secp256k1_scratch * scratch , secp256k1_gej * r , const secp256k1_scalar * inp_g_sc , secp256k1_ecmult_multi_callback cb , void * cbdata , size_t n_points , size_t cb_offset ) {
989
989
/* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch
990
990
* sizes. The reason for +1 is that we add the G scalar to the list of
991
991
* other scalars. */
@@ -1010,15 +1010,15 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
1010
1010
}
1011
1011
1012
1012
bucket_window = secp256k1_pippenger_bucket_window (n_points );
1013
- if (!secp256k1_scratch_allocate_frame (scratch , secp256k1_pippenger_scratch_size (n_points , bucket_window ), PIPPENGER_SCRATCH_OBJECTS )) {
1013
+ if (!secp256k1_scratch_allocate_frame (error_callback , scratch , secp256k1_pippenger_scratch_size (n_points , bucket_window ), PIPPENGER_SCRATCH_OBJECTS )) {
1014
1014
return 0 ;
1015
1015
}
1016
- points = (secp256k1_ge * ) secp256k1_scratch_alloc (scratch , entries * sizeof (* points ));
1017
- scalars = (secp256k1_scalar * ) secp256k1_scratch_alloc (scratch , entries * sizeof (* scalars ));
1018
- state_space = (struct secp256k1_pippenger_state * ) secp256k1_scratch_alloc (scratch , sizeof (* state_space ));
1019
- state_space -> ps = (struct secp256k1_pippenger_point_state * ) secp256k1_scratch_alloc (scratch , entries * sizeof (* state_space -> ps ));
1020
- state_space -> wnaf_na = (int * ) secp256k1_scratch_alloc (scratch , entries * (WNAF_SIZE (bucket_window + 1 )) * sizeof (int ));
1021
- buckets = (secp256k1_gej * ) secp256k1_scratch_alloc (scratch , sizeof (* buckets ) << bucket_window );
1016
+ points = (secp256k1_ge * ) secp256k1_scratch_alloc (error_callback , scratch , entries * sizeof (* points ));
1017
+ scalars = (secp256k1_scalar * ) secp256k1_scratch_alloc (error_callback , scratch , entries * sizeof (* scalars ));
1018
+ state_space = (struct secp256k1_pippenger_state * ) secp256k1_scratch_alloc (error_callback , scratch , sizeof (* state_space ));
1019
+ state_space -> ps = (struct secp256k1_pippenger_point_state * ) secp256k1_scratch_alloc (error_callback , scratch , entries * sizeof (* state_space -> ps ));
1020
+ state_space -> wnaf_na = (int * ) secp256k1_scratch_alloc (error_callback , scratch , entries * (WNAF_SIZE (bucket_window + 1 )) * sizeof (int ));
1021
+ buckets = (secp256k1_gej * ) secp256k1_scratch_alloc (error_callback , scratch , ( 1 << bucket_window ) * sizeof (* buckets ));
1022
1022
1023
1023
if (inp_g_sc != NULL ) {
1024
1024
scalars [0 ] = * inp_g_sc ;
@@ -1032,7 +1032,7 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
1032
1032
1033
1033
while (point_idx < n_points ) {
1034
1034
if (!cb (& scalars [idx ], & points [idx ], point_idx + cb_offset , cbdata )) {
1035
- secp256k1_scratch_deallocate_frame (scratch );
1035
+ secp256k1_scratch_deallocate_frame (error_callback , scratch );
1036
1036
return 0 ;
1037
1037
}
1038
1038
idx ++ ;
@@ -1056,22 +1056,22 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
1056
1056
for (i = 0 ; i < 1 <<bucket_window ; i ++ ) {
1057
1057
secp256k1_gej_clear (& buckets [i ]);
1058
1058
}
1059
- secp256k1_scratch_deallocate_frame (scratch );
1059
+ secp256k1_scratch_deallocate_frame (error_callback , scratch );
1060
1060
return 1 ;
1061
1061
}
1062
1062
1063
1063
/* Wrapper for secp256k1_ecmult_multi_func interface */
1064
- static int secp256k1_ecmult_pippenger_batch_single (const secp256k1_ecmult_context * actx , secp256k1_scratch * scratch , secp256k1_gej * r , const secp256k1_scalar * inp_g_sc , secp256k1_ecmult_multi_callback cb , void * cbdata , size_t n ) {
1065
- return secp256k1_ecmult_pippenger_batch (actx , scratch , r , inp_g_sc , cb , cbdata , n , 0 );
1064
+ static int secp256k1_ecmult_pippenger_batch_single (const secp256k1_callback * error_callback , const secp256k1_ecmult_context * actx , secp256k1_scratch * scratch , secp256k1_gej * r , const secp256k1_scalar * inp_g_sc , secp256k1_ecmult_multi_callback cb , void * cbdata , size_t n ) {
1065
+ return secp256k1_ecmult_pippenger_batch (error_callback , actx , scratch , r , inp_g_sc , cb , cbdata , n , 0 );
1066
1066
}
1067
1067
1068
1068
/**
1069
1069
* Returns the maximum number of points in addition to G that can be used with
1070
1070
* a given scratch space. The function ensures that fewer points may also be
1071
1071
* used.
1072
1072
*/
1073
- static size_t secp256k1_pippenger_max_points (secp256k1_scratch * scratch ) {
1074
- size_t max_alloc = secp256k1_scratch_max_allocation (scratch , PIPPENGER_SCRATCH_OBJECTS );
1073
+ static size_t secp256k1_pippenger_max_points (const secp256k1_callback * error_callback , secp256k1_scratch * scratch ) {
1074
+ size_t max_alloc = secp256k1_scratch_max_allocation (error_callback , scratch , PIPPENGER_SCRATCH_OBJECTS );
1075
1075
int bucket_window ;
1076
1076
size_t res = 0 ;
1077
1077
@@ -1153,11 +1153,11 @@ static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n
1153
1153
return 1 ;
1154
1154
}
1155
1155
1156
- typedef int (* secp256k1_ecmult_multi_func )(const secp256k1_ecmult_context * , secp256k1_scratch * , secp256k1_gej * , const secp256k1_scalar * , secp256k1_ecmult_multi_callback cb , void * , size_t );
1157
- static int secp256k1_ecmult_multi_var (const secp256k1_ecmult_context * ctx , secp256k1_scratch * scratch , secp256k1_gej * r , const secp256k1_scalar * inp_g_sc , secp256k1_ecmult_multi_callback cb , void * cbdata , size_t n ) {
1156
+ typedef int (* secp256k1_ecmult_multi_func )(const secp256k1_callback * error_callback , const secp256k1_ecmult_context * , secp256k1_scratch * , secp256k1_gej * , const secp256k1_scalar * , secp256k1_ecmult_multi_callback cb , void * , size_t );
1157
+ static int secp256k1_ecmult_multi_var (const secp256k1_callback * error_callback , const secp256k1_ecmult_context * ctx , secp256k1_scratch * scratch , secp256k1_gej * r , const secp256k1_scalar * inp_g_sc , secp256k1_ecmult_multi_callback cb , void * cbdata , size_t n ) {
1158
1158
size_t i ;
1159
1159
1160
- int (* f )(const secp256k1_ecmult_context * , secp256k1_scratch * , secp256k1_gej * , const secp256k1_scalar * , secp256k1_ecmult_multi_callback cb , void * , size_t , size_t );
1160
+ int (* f )(const secp256k1_callback * error_callback , const secp256k1_ecmult_context * , secp256k1_scratch * , secp256k1_gej * , const secp256k1_scalar * , secp256k1_ecmult_multi_callback cb , void * , size_t , size_t );
1161
1161
size_t n_batches ;
1162
1162
size_t n_batch_points ;
1163
1163
@@ -1178,13 +1178,13 @@ static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp2
1178
1178
* a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm.
1179
1179
* As a first step check if there's enough space for Pippenger's algo (which requires less space
1180
1180
* than Strauss' algo) and if not, use the simple algorithm. */
1181
- if (!secp256k1_ecmult_multi_batch_size_helper (& n_batches , & n_batch_points , secp256k1_pippenger_max_points (scratch ), n )) {
1181
+ if (!secp256k1_ecmult_multi_batch_size_helper (& n_batches , & n_batch_points , secp256k1_pippenger_max_points (error_callback , scratch ), n )) {
1182
1182
return secp256k1_ecmult_multi_simple_var (ctx , r , inp_g_sc , cb , cbdata , n );
1183
1183
}
1184
1184
if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD ) {
1185
1185
f = secp256k1_ecmult_pippenger_batch ;
1186
1186
} else {
1187
- if (!secp256k1_ecmult_multi_batch_size_helper (& n_batches , & n_batch_points , secp256k1_strauss_max_points (scratch ), n )) {
1187
+ if (!secp256k1_ecmult_multi_batch_size_helper (& n_batches , & n_batch_points , secp256k1_strauss_max_points (error_callback , scratch ), n )) {
1188
1188
return secp256k1_ecmult_multi_simple_var (ctx , r , inp_g_sc , cb , cbdata , n );
1189
1189
}
1190
1190
f = secp256k1_ecmult_strauss_batch ;
@@ -1193,7 +1193,7 @@ static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp2
1193
1193
size_t nbp = n < n_batch_points ? n : n_batch_points ;
1194
1194
size_t offset = n_batch_points * i ;
1195
1195
secp256k1_gej tmp ;
1196
- if (!f (ctx , scratch , & tmp , i == 0 ? inp_g_sc : NULL , cb , cbdata , nbp , offset )) {
1196
+ if (!f (error_callback , ctx , scratch , & tmp , i == 0 ? inp_g_sc : NULL , cb , cbdata , nbp , offset )) {
1197
1197
return 0 ;
1198
1198
}
1199
1199
secp256k1_gej_add_var (r , r , & tmp , NULL );
0 commit comments