1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
use super::msm;
use crate::groth16::aggregate::commit::*;
use crate::groth16::multiscalar::{precompute_fixed_window, MultiscalarPrecompOwned, WINDOW_SIZE};
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use digest::Digest;
use ff::{Field, PrimeField, PrimeFieldBits};
use group::{
    prime::{PrimeCurve, PrimeCurveAffine},
    Curve, Group, GroupEncoding,
};
#[cfg(not(target_arch = "wasm32"))]
use memmap2::Mmap;
use pairing::Engine;
use rayon::prelude::*;
use sha2::Sha256;
use std::convert::TryFrom;
use std::io::{self, Error, ErrorKind, Read, Write};
#[cfg(not(target_arch = "wasm32"))]
use std::mem::size_of;
use std::ops::MulAssign;

/// Maximum size of the generic SRS constructed from Filecoin and Zcash power of
/// taus.
///
/// https://github.com/nikkolasg/taupipp/blob/baca1426266bf39416c45303e35c966d69f4f8b4/src/bin/assemble.rs#L12
pub const MAX_SRS_SIZE: usize = (2 << 19) + 1;

/// It contains the maximum number of raw elements of the SRS needed to aggregate and verify
/// Groth16 proofs. One can derive specialized prover and verifier key for _specific_ size of
/// aggregations by calling `srs.specialize(n)`. The specialized prover key also contains
/// precomputed tables that drastically increase prover's performance.
/// This GenericSRS is usually formed from the transcript of two distinct power of taus ceremony
/// ,in other words from two distinct Groth16 CRS.
/// See [there](https://github.com/nikkolasg/taupipp) a way on how to generate this GenesisSRS.
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug)]
pub struct GenericSRS<E: Engine> {
    /// $\{g^a^i\}_{i=0}^{N}$ where N is the smallest size of the two Groth16 CRS.
    pub g_alpha_powers: Vec<E::G1Affine>,
    /// $\{h^a^i\}_{i=0}^{N}$ where N is the smallest size of the two Groth16 CRS.
    pub h_alpha_powers: Vec<E::G2Affine>,
    /// $\{g^b^i\}_{i=n}^{N}$ where N is the smallest size of the two Groth16 CRS.
    pub g_beta_powers: Vec<E::G1Affine>,
    /// $\{h^b^i\}_{i=0}^{N}$ where N is the smallest size of the two Groth16 CRS.
    pub h_beta_powers: Vec<E::G2Affine>,
}

/// ProverSRS is the specialized SRS version for the prover for a specific number of proofs to
/// aggregate. It contains as well the commitment keys for this specific size.
/// Note the size must be a power of two for the moment - if it is not, padding must be applied.
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug)]
pub struct ProverSRS<E: Engine> {
    /// number of proofs to aggregate
    pub n: usize,
    /// $\{g^a^i\}_{i=0}^{2n-1}$ where n is the number of proofs to be aggregated
    /// We take all powers instead of only ones from n -> 2n-1 (w commitment key
    /// is formed from these powers) since the prover will create a shifted
    /// polynomial of degree 2n-1 when doing the KZG opening proof.
    pub g_alpha_powers_table: MultiscalarPrecompOwned<E::G1Affine>,
    /// $\{h^a^i\}_{i=0}^{n-1}$ - here we don't need to go to 2n-1 since v
    /// commitment key only goes up to n-1 exponent.
    pub h_alpha_powers_table: MultiscalarPrecompOwned<E::G2Affine>,
    /// $\{g^b^i\}_{i=0}^{2n-1}$
    pub g_beta_powers_table: MultiscalarPrecompOwned<E::G1Affine>,
    /// $\{h^b^i\}_{i=0}^{n-1}$
    pub h_beta_powers_table: MultiscalarPrecompOwned<E::G2Affine>,
    /// commitment key using in MIPP and TIPP
    pub vkey: VKey<E>,
    /// commitment key using in TIPP
    pub wkey: WKey<E>,
}

/// ProverSRS is the specialized SRS version for the prover for a specific number of proofs to
/// aggregate. It contains as well the commitment keys for this specific size.
/// Note the size must be a power of two for the moment - if it is not, padding must be applied.
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug)]
pub struct ProverSRSInputAggregation<E: Engine> {
    pub prover_srs: ProverSRS<E>,
    /// Needed for optimized input aggregation
    pub g_alpha_powers_end_table: MultiscalarPrecompOwned<E::G1Affine>,
}

impl<E: Engine> std::ops::Deref for ProverSRSInputAggregation<E> {
    type Target = ProverSRS<E>;

    fn deref(&self) -> &Self::Target {
        &self.prover_srs
    }
}

/// Contains the necessary elements to verify an aggregated Groth16 proof; it is of fixed size
/// regardless of the number of proofs aggregated. However, a verifier SRS will be determined by
/// the number of proofs being aggregated.
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug)]
pub struct VerifierSRS<E: Engine> {
    pub n: usize,
    pub g: E::G1,
    pub h: E::G2,
    pub g_alpha: E::G1,
    pub g_beta: E::G1,
    pub h_alpha: E::G2,
    pub h_beta: E::G2,
    pub h_alpha_d: E::G2,
}

impl<E: Engine> PartialEq for GenericSRS<E> {
    fn eq(&self, other: &Self) -> bool {
        self.g_alpha_powers == other.g_alpha_powers
            && self.g_beta_powers == other.g_beta_powers
            && self.h_alpha_powers == other.h_alpha_powers
            && self.h_beta_powers == other.h_beta_powers
    }
}

impl<E: Engine> PartialEq for VerifierSRS<E> {
    fn eq(&self, other: &Self) -> bool {
        self.g == other.g
            && self.h == other.h
            && self.g_alpha == other.g_alpha
            && self.g_beta == other.g_beta
            && self.h_alpha == other.h_alpha
            && self.h_beta == other.h_beta
            && self.h_alpha_d == other.h_alpha_d
    }
}

impl<E: Engine> ProverSRS<E> {
    /// Returns true if commitment keys have the exact required length.
    /// It is necessary for the IPP scheme to work that commitment
    /// key have the exact same number of arguments as the number of proofs to
    /// aggregate.
    pub fn has_correct_len(&self, n: usize) -> bool {
        self.vkey.has_correct_len(n) && self.wkey.has_correct_len(n)
    }
}

impl<E: Engine> ProverSRSInputAggregation<E> {
    /// Returns true if commitment keys have the exact required length.
    /// It is necessary for the IPP scheme to work that commitment
    /// key have the exact same number of arguments as the number of proofs to
    /// aggregate.
    pub fn has_correct_len(&self, n: usize) -> bool {
        self.vkey.has_correct_len(n) && self.wkey.has_correct_len(n)
    }
}

impl<E> GenericSRS<E>
where
    E: Engine,
    <E::G1Affine as GroupEncoding>::Repr: Sync,
    <E::G2Affine as GroupEncoding>::Repr: Sync,
{
    /// Returns the prover and verifier SRS for a specific number of
    /// proofs to aggregate. The number of proofs MUST BE a power of two, it
    /// panics otherwise. The number of proofs must be inferior to half of the
    /// size of the generic srs otherwise it panics.
    pub fn specialize(&self, num_proofs: usize) -> (ProverSRS<E>, VerifierSRS<E>) {
        let pk = self.specialize_prover(num_proofs);
        let vk = self.specialize_vk(num_proofs);
        (pk, vk)
    }

    /// Returns the prover and verifier SRS for a specific number of
    /// proofs to aggregate. The number of proofs MUST BE a power of two, it
    /// panics otherwise. The number of proofs must be inferior to half of the
    /// size of the generic srs otherwise it panics.
    pub fn specialize_input_aggregation(
        &self,
        num_proofs: usize,
    ) -> (ProverSRSInputAggregation<E>, VerifierSRS<E>) {
        let prover_srs = self.specialize_prover(num_proofs);

        let g_alpha_powers_end_table = precompute_fixed_window(
            &self.g_alpha_powers[self.g_alpha_powers.len() - num_proofs..],
            WINDOW_SIZE,
        );

        let pk = ProverSRSInputAggregation::<E> {
            prover_srs,
            g_alpha_powers_end_table,
        };
        let vk = self.specialize_vk(num_proofs);
        (pk, vk)
    }

    pub fn specialize_prover(&self, num_proofs: usize) -> ProverSRS<E> {
        assert!(num_proofs.is_power_of_two());
        let tn = 2 * num_proofs; // size of the CRS we need
        assert!(self.g_alpha_powers.len() >= tn);
        assert!(self.h_alpha_powers.len() >= tn);
        assert!(self.g_beta_powers.len() >= tn);
        assert!(self.h_beta_powers.len() >= tn);
        let n = num_proofs;

        // when doing the KZG opening we need _all_ coefficients from 0
        // to 2n-1 because the polynomial is of degree 2n-1.
        let g_low = 0;
        let g_up = tn;
        let h_low = 0;
        let h_up = h_low + n;
        let g_alpha_powers_table =
            precompute_fixed_window(&self.g_alpha_powers[g_low..g_up], WINDOW_SIZE);
        let g_beta_powers_table =
            precompute_fixed_window(&self.g_beta_powers[g_low..g_up], WINDOW_SIZE);
        let h_alpha_powers_table =
            precompute_fixed_window(&self.h_alpha_powers[h_low..h_up], WINDOW_SIZE);
        let h_beta_powers_table =
            precompute_fixed_window(&self.h_beta_powers[h_low..h_up], WINDOW_SIZE);
        let v1 = self.h_alpha_powers[h_low..h_up].to_vec();
        let v2 = self.h_beta_powers[h_low..h_up].to_vec();
        let vkey = VKey::<E> { a: v1, b: v2 };
        assert!(vkey.has_correct_len(n));
        // however, here we only need the "right" shifted bases for the
        // commitment scheme.
        let w1 = self.g_alpha_powers[n..g_up].to_vec();
        let w2 = self.g_beta_powers[n..g_up].to_vec();
        let wkey = WKey::<E> { a: w1, b: w2 };
        assert!(wkey.has_correct_len(n));
        ProverSRS::<E> {
            g_alpha_powers_table,
            g_beta_powers_table,
            h_alpha_powers_table,
            h_beta_powers_table,
            vkey,
            wkey,
            n,
        }
    }

    pub fn specialize_vk(&self, num_proofs: usize) -> VerifierSRS<E> {
        assert!(num_proofs.is_power_of_two());
        let n = num_proofs;
        VerifierSRS::<E> {
            n,
            g: self.g_alpha_powers[0].to_curve(),
            h: self.h_alpha_powers[0].to_curve(),
            g_alpha: self.g_alpha_powers[1].to_curve(),
            g_beta: self.g_beta_powers[1].to_curve(),
            h_alpha: self.h_alpha_powers[1].to_curve(),
            h_beta: self.h_beta_powers[1].to_curve(),
            h_alpha_d: self.h_alpha_powers[self.g_alpha_powers.len() - num_proofs].into(),
        }
    }

    pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
        write_vec(writer, &self.g_alpha_powers)?;
        write_vec(writer, &self.g_beta_powers)?;
        write_vec(writer, &self.h_alpha_powers)?;
        write_vec(writer, &self.h_beta_powers)?;
        Ok(())
    }

    /// Returns the hash over all powers of this generic srs.
    pub fn hash(&self) -> Vec<u8> {
        let mut v = Vec::new();
        self.write(&mut v).expect("failed to compute hash");
        Sha256::digest(&v).to_vec()
    }

    pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {
        let g_alpha_powers = read_vec(reader)?;
        let g_beta_powers = read_vec(reader)?;
        let h_alpha_powers = read_vec(reader)?;
        let h_beta_powers = read_vec(reader)?;
        Ok(Self {
            g_alpha_powers,
            g_beta_powers,
            h_alpha_powers,
            h_beta_powers,
        })
    }

    #[cfg(not(target_arch = "wasm32"))]
    pub fn read_mmap(reader: &Mmap, max_len: usize) -> io::Result<Self> {
        fn read_length(mmap: &Mmap, offset: &mut usize) -> Result<usize, std::io::Error> {
            let u32_len = size_of::<u32>();
            let mut raw_len = &mmap[*offset..*offset + u32_len];
            *offset += u32_len;

            match raw_len.read_u32::<BigEndian>() {
                Ok(len) => Ok(len as usize),
                Err(err) => Err(err),
            }
        }

        // The 'max_len' argument allows us to read up to that max
        // (e.g.. 2 << 14), rather then entire vec_len (i.e. 2 << 19)
        fn mmap_read_vec<G: PrimeCurveAffine>(
            mmap: &Mmap,
            offset: &mut usize,
            max_len: usize,
        ) -> io::Result<Vec<G>> {
            let point_len = size_of::<G::Repr>();
            let vec_len = read_length(mmap, offset)?;
            if vec_len > MAX_SRS_SIZE {
                return Err(io::Error::new(
                    io::ErrorKind::InvalidData,
                    format!("invalid SRS vector length {}", vec_len,),
                ));
            }

            let max_len = if max_len > vec_len { vec_len } else { max_len };

            let vec: Vec<G> = (0..max_len)
                .into_par_iter()
                .map(|i| {
                    let data_start = *offset + (i * point_len);
                    let data_end = data_start + point_len;
                    let ptr = &mmap[data_start..data_end];

                    // Safety: this operation is safe because it's a read on
                    // a buffer that's already allocated and being iterated on.
                    let g1_repr = unsafe { &*(ptr as *const [u8] as *const G::Repr) };
                    let opt: Option<G> = G::from_bytes(g1_repr).into();
                    opt.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "not on curve"))
                })
                .collect::<io::Result<Vec<_>>>()?;
            *offset += vec_len * point_len;
            Ok(vec)
        }

        let mut offset: usize = 0;
        let g_alpha_powers = mmap_read_vec::<E::G1Affine>(reader, &mut offset, max_len)?;
        let g_beta_powers = mmap_read_vec::<E::G1Affine>(reader, &mut offset, max_len)?;
        let h_alpha_powers = mmap_read_vec::<E::G2Affine>(reader, &mut offset, max_len)?;
        let h_beta_powers = mmap_read_vec::<E::G2Affine>(reader, &mut offset, max_len)?;
        Ok(Self {
            g_alpha_powers,
            g_beta_powers,
            h_alpha_powers,
            h_beta_powers,
        })
    }
}

pub fn setup_fake_srs<E, R>(rng: &mut R, size: usize) -> GenericSRS<E>
where
    E: Engine,
    E::Fr: PrimeFieldBits,
    R: rand_core::RngCore,
{
    let alpha = E::Fr::random(&mut *rng);
    let beta = E::Fr::random(&mut *rng);
    let g = E::G1::generator();
    let h = E::G2::generator();

    let alpha = &alpha;
    let h = &h;
    let g = &g;
    let beta = &beta;
    par! {
        let g_alpha_powers = structured_generators_scalar_power(2 * size, g, alpha),
        let g_beta_powers = structured_generators_scalar_power(2 * size, g, beta),
        let h_alpha_powers = structured_generators_scalar_power(2 * size, h, alpha),
        let h_beta_powers = structured_generators_scalar_power(2 * size, h, beta)
    };

    debug_assert!(h_alpha_powers[0] == E::G2::generator().to_affine());
    debug_assert!(h_beta_powers[0] == E::G2::generator().to_affine());
    debug_assert!(g_alpha_powers[0] == E::G1::generator().to_affine());
    debug_assert!(g_beta_powers[0] == E::G1::generator().to_affine());

    GenericSRS {
        g_alpha_powers,
        g_beta_powers,
        h_alpha_powers,
        h_beta_powers,
    }
}

pub(crate) fn structured_generators_scalar_power<G>(
    num: usize,
    g: &G,
    s: &G::Scalar,
) -> Vec<G::AffineRepr>
where
    G: PrimeCurve,
    G::Scalar: PrimeFieldBits,
    G::AffineRepr: Send,
{
    assert!(num > 0);
    let mut powers_of_scalar = Vec::with_capacity(num);
    let mut pow_s = G::Scalar::ONE;
    for _ in 0..num {
        powers_of_scalar.push(pow_s);
        pow_s.mul_assign(s);
    }

    let window_size = msm::fixed_base::get_mul_window_size(num);
    let scalar_bits = G::Scalar::NUM_BITS as usize;
    let g_table = msm::fixed_base::get_window_table(scalar_bits, window_size, *g);
    let powers_of_g = msm::fixed_base::multi_scalar_mul::<G>(
        scalar_bits,
        window_size,
        &g_table,
        &powers_of_scalar[..],
    );
    powers_of_g.into_iter().map(|v| v.to_affine()).collect()
}

fn write_vec<G: PrimeCurveAffine, W: Write>(w: &mut W, v: &[G]) -> io::Result<()> {
    w.write_u32::<BigEndian>(u32::try_from(v.len()).map_err(|_| {
        Error::new(
            ErrorKind::InvalidInput,
            format!("invalid vector length > u32: {}", v.len()),
        )
    })?)?;
    for p in v {
        write_point(w, p)?;
    }
    Ok(())
}

fn write_point<G: PrimeCurveAffine, W: Write>(w: &mut W, p: &G) -> io::Result<()> {
    w.write_all(p.to_bytes().as_ref())?;
    Ok(())
}

fn read_vec<G, R>(r: &mut R) -> io::Result<Vec<G>>
where
    G: PrimeCurveAffine,
    G::Repr: Sync,
    R: Read,
{
    let vector_len = r.read_u32::<BigEndian>()? as usize;
    if vector_len > MAX_SRS_SIZE {
        return Err(Error::new(
            ErrorKind::InvalidData,
            format!("invalid SRS vector length {}", vector_len),
        ));
    }

    let data: Vec<G::Repr> = (0..vector_len)
        .map(|_| {
            let mut el = G::Repr::default();
            r.read_exact(el.as_mut())?;
            Ok(el)
        })
        .collect::<Result<_, io::Error>>()?;

    data.par_iter()
        .map(|enc| {
            let opt: Option<G> = G::from_bytes(enc).into();
            opt.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "not on curve"))
        })
        .collect::<io::Result<Vec<_>>>()
}

#[cfg(test)]
mod test {
    use super::*;
    use blstrs::Bls12;
    use rand_core::SeedableRng;
    use std::io::Cursor;

    #[test]
    fn test_srs_invalid_length() {
        let mut rng = rand_chacha::ChaChaRng::seed_from_u64(0u64);
        let size = 8;
        let srs = setup_fake_srs::<Bls12, _>(&mut rng, size);
        let vec_len = srs.g_alpha_powers.len();
        let mut buffer = Vec::new();
        srs.write(&mut buffer).expect("writing to buffer failed");
        // tryingout normal operations
        GenericSRS::<Bls12>::read(&mut Cursor::new(&buffer)).expect("can't read the srs");

        // trying to read the first size
        let read_size = Cursor::new(&buffer).read_u32::<BigEndian>().unwrap() as usize;
        assert_eq!(vec_len, read_size);

        // remove the previous size from the bufer - u32 = 4 bytes
        // and replace the size by appending the rest
        let mut new_buffer = Vec::new();
        let invalid_size = MAX_SRS_SIZE + 1;
        new_buffer
            .write_u32::<BigEndian>(invalid_size as u32)
            .expect("failed to write invalid size");
        buffer.drain(0..4);
        new_buffer.append(&mut buffer);
        GenericSRS::<Bls12>::read(&mut Cursor::new(&new_buffer))
            .expect_err("this should have failed");
    }
}