labrador/core/garbage_polynomials.rs
1use crate::ring::{rq_matrix::RqMatrix, rq_vector::RqVector};
2
3use super::inner_product;
4
5/// Calculate the garbage polynomials g_{ij} = <s_i, s_j>
6/// Exploits symmetry by only calculating for i ≤ j since g_{ij} = g_{ji}
7pub fn compute_g(witness_vector: &[RqVector]) -> RqMatrix {
8 let mut g_i = Vec::new();
9 for i in 0..witness_vector.len() {
10 let mut g_ij = Vec::new();
11 for j in 0..=i {
12 // Only calculate for j ≤ i (upper triangular)
13 g_ij.push(inner_product::compute_linear_combination(
14 witness_vector[i].get_elements(),
15 witness_vector[j].get_elements(),
16 ));
17 }
18 g_i.push(RqVector::new(g_ij));
19 }
20 RqMatrix::new(g_i, true)
21}
22
23/// Calculate the h_{ij} = <φ_i, s_j> + <φ_j, s_i> garbage polynomials
24/// In the paper, h_{ij} is defined with a factor of 1/2 in front
25/// However, since we're using q = 2^32, division by 2 is problematic in Z_q
26/// So we store h'_{ij} = 2*h_{ij} = <φ_i, s_j> + <φ_j, s_i> directly
27/// Exploits symmetry by only calculating for i ≤ j since h_{ij} = h_{ji}
28pub fn compute_h(witness_vector: &[RqVector], phi: &[RqVector]) -> RqMatrix {
29 let r = witness_vector.len();
30 let mut h_i = Vec::with_capacity((r * (r + 1)) / 2);
31
32 for i in 0..r {
33 let mut h_ij = Vec::new();
34 for j in 0..=i {
35 // Only calculate for j ≤ i (upper triangular)
36 let inner_phi_i_s_j = inner_product::compute_linear_combination(
37 phi[i].get_elements(),
38 witness_vector[j].get_elements(),
39 );
40 let inner_phi_j_s_i = inner_product::compute_linear_combination(
41 phi[j].get_elements(),
42 witness_vector[i].get_elements(),
43 );
44 h_ij.push(&inner_phi_i_s_j + &inner_phi_j_s_i);
45 }
46 h_i.push(RqVector::new(h_ij));
47 }
48 RqMatrix::new(h_i, true)
49}
50
51// Todo: Revise and complete the following
52// Implementation of the final level optimization (Section 5.6)
53// /// Uses sequentially derived challenges via Fiat-Shamir to simulate the interactive protocol
54// pub fn optimize_final_level(
55// witnesses: &[PolyVector],
56// phi: &[PolyVector],
57// initial_seed: u64,
58// ) -> (PolyRing, Vec<PolyRing>, Vec<PolyRing>) {
59// let r = witnesses.len();
60
61// // Calculate g_0 = Σ_i <s_i, s_i> (diagonal sum)
62// let g0 = (0..r)
63// .map(|i| witnesses[i].inner_product_poly_vector(&witnesses[i]))
64// .fold(
65// PolyRing::zero(witnesses[0].get_elements()[0].len()),
66// |acc, g| &acc + &g,
67// );
68
69// // Generate sequence of challenges using Fiat-Shamir
70// let mut challenges = Vec::with_capacity(r / 2);
71// let mut hasher = DefaultHasher::new();
72// initial_seed.hash(&mut hasher);
73// let mut current_seed = hasher.finish();
74
75// for _ in 0..r / 2 {
76// let mut rng = rand::rngs::StdRng::seed_from_u64(current_seed);
77// let challenge = PolyRing::random(&mut rng, witnesses[0].get_elements()[0].len());
78// challenges.push(challenge);
79
80// // Update seed for next challenge
81// let mut hasher = DefaultHasher::new();
82// current_seed.hash(&mut hasher);
83// current_seed = hasher.finish();
84// }
85
86// // Calculate selected g terms: g_{2i-1} and g_{2i}
87// let mut g_terms = Vec::new();
88// for i in 1..=r / 2 {
89// let idx1 = 2 * i - 2;
90// let idx2 = 2 * i - 1;
91
92// // Use unique challenge for each i
93// let challenge = &challenges[i - 1];
94
95// // Add g_{2i-1}
96// if idx2 < r {
97// // For g_{2i-1} = <s_{2i-2}, c_i * s_{2i-1}>
98// let s_j_scaled = witnesses[idx2]
99// .iter()
100// .map(|p| p * challenge)
101// .collect::<PolyVector>();
102
103// let g_2i_1 = witnesses[idx1].inner_product_poly_vector(&s_j_scaled);
104// g_terms.push(g_2i_1);
105
106// // Add g_{2i} if we have enough witnesses
107// if 2 * i < r {
108// // For g_{2i} = <s_{2i-1}, c_i * s_{2i}>
109// let s_j_scaled = witnesses[2 * i]
110// .iter()
111// .map(|p| p * challenge)
112// .collect::<PolyVector>();
113
114// let g_2i = witnesses[idx2].inner_product_poly_vector(&s_j_scaled);
115// g_terms.push(g_2i);
116// }
117// }
118// }
119
120// // Calculate selected h terms: h_{2i-1} and h_{2i}
121// let mut h_terms = Vec::new();
122// for i in 1..=r / 2 {
123// let idx1 = 2 * i - 2;
124// let idx2 = 2 * i - 1;
125
126// // Use unique challenge for each i
127// let challenge = &challenges[i - 1];
128
129// // Add h_{2i-1}
130// if idx2 < r {
131// // For h_{2i-1} = <φ_{2i-2}, c_i * s_{2i-1}> + <φ_{2i-1}, c_i * s_{2i-2}>
132// let s_j_scaled = witnesses[idx2]
133// .iter()
134// .map(|p| p * challenge)
135// .collect::<PolyVector>();
136
137// let phi_i_s_j = phi[idx1].inner_product_poly_vector(&s_j_scaled);
138
139// let s_i_scaled = witnesses[idx1]
140// .iter()
141// .map(|p| p * challenge)
142// .collect::<PolyVector>();
143
144// let phi_j_s_i = phi[idx2].inner_product_poly_vector(&s_i_scaled);
145
146// let h_2i_1 = &phi_i_s_j + &phi_j_s_i;
147// h_terms.push(h_2i_1);
148
149// // Add h_{2i} if we have enough witnesses
150// if 2 * i < r {
151// // For h_{2i} = <φ_{2i-1}, c_i * s_{2i}> + <φ_{2i}, c_i * s_{2i-1}>
152// let s_j_scaled = witnesses[2 * i]
153// .iter()
154// .map(|p| p * challenge)
155// .collect::<PolyVector>();
156
157// let phi_i_s_j = phi[idx2].inner_product_poly_vector(&s_j_scaled);
158
159// let s_i_scaled = witnesses[idx2]
160// .iter()
161// .map(|p| p * challenge)
162// .collect::<PolyVector>();
163
164// let phi_j_s_i = phi[2 * i].inner_product_poly_vector(&s_i_scaled);
165
166// let h_2i = &phi_i_s_j + &phi_j_s_i;
167// h_terms.push(h_2i);
168// }
169// }
170// }
171
172// (g0, g_terms, h_terms)
173// }
174
175#[cfg(test)]
176mod tests {
177 use crate::ring::rq::Rq;
178
179 use super::*;
180 use rand::rng;
181
182 const RANK: usize = 8;
183
184 fn create_test_witnesses(count: usize) -> (Vec<RqVector>, Vec<RqVector>) {
185 let witnesses = (0..count)
186 .map(|_| RqVector::random(&mut rng(), RANK))
187 .collect();
188
189 let phi = (0..count)
190 .map(|_| RqVector::random(&mut rng(), RANK))
191 .collect();
192
193 (witnesses, phi)
194 }
195
196 #[test]
197 fn test_g_matrix_size() {
198 let multiplicity = 3;
199 let (witnesses, _) = create_test_witnesses(multiplicity);
200 let g = compute_g(&witnesses);
201
202 assert_eq!(g.get_row_len(), 3);
203 // Assert that g stores half of the matrix
204 for row in 0..multiplicity {
205 assert_eq!(g.get_elements()[row].get_length(), row + 1);
206 }
207 }
208
209 #[test]
210 fn test_g_calculation() {
211 let (witnesses, _) = create_test_witnesses(3);
212
213 let g = compute_g(&witnesses);
214
215 // Verify a few specific values
216 let expected_g_01 = inner_product::compute_linear_combination(
217 witnesses[0].get_elements(),
218 witnesses[1].get_elements(),
219 );
220 let expected_g_10 = inner_product::compute_linear_combination(
221 witnesses[1].get_elements(),
222 witnesses[0].get_elements(),
223 );
224 assert_eq!(expected_g_01, expected_g_10);
225
226 let expected_g_22 = inner_product::compute_linear_combination(
227 witnesses[2].get_elements(),
228 witnesses[2].get_elements(),
229 );
230
231 assert_eq!(*g.get_cell(0, 1), expected_g_01);
232 assert_eq!(*g.get_cell(1, 0), expected_g_10);
233 assert_eq!(*g.get_cell(2, 2), expected_g_22);
234 }
235
236 #[test]
237 fn test_h_matrix_size() {
238 let multiplicity = 3;
239 let (witnesses, phi) = create_test_witnesses(multiplicity);
240 let h = compute_h(&witnesses, &phi);
241
242 assert_eq!(h.get_row_len(), 3);
243 // Assert that g stores half of the matrix
244 for row in 0..multiplicity {
245 assert_eq!(h.get_elements()[row].get_length(), row + 1);
246 }
247 }
248
249 #[test]
250 fn test_h_calculation() {
251 let (witnesses, phi) = create_test_witnesses(3);
252 let h = compute_h(&witnesses, &phi);
253
254 // Verify a specific value
255 let phi_0_s_1 = inner_product::compute_linear_combination(
256 phi[0].get_elements(),
257 witnesses[1].get_elements(),
258 );
259 let phi_1_s_0 = inner_product::compute_linear_combination(
260 phi[1].get_elements(),
261 witnesses[0].get_elements(),
262 );
263 let expected_h_01 = &phi_0_s_1 + &phi_1_s_0;
264
265 assert_eq!(h.get_cell(0, 1), h.get_cell(1, 0));
266 assert_eq!(expected_h_01, *h.get_cell(0, 1));
267 }
268
269 #[test]
270 fn test_g_computation_with_zero_vectors() {
271 let witness_vector = vec![RqVector::zero(100); 50];
272 let g = compute_g(&witness_vector);
273
274 for row in g.get_elements() {
275 for cell in row.get_elements() {
276 assert_eq!(*cell, Rq::zero());
277 }
278 }
279 }
280
281 #[test]
282 fn test_h_computation_with_zero_vectors() {
283 let witness_vector = vec![RqVector::zero(100); 50];
284 let phi = vec![RqVector::zero(100); 50];
285 let h = compute_h(&witness_vector, &phi);
286
287 for row in h.get_elements() {
288 for cell in row.get_elements() {
289 assert_eq!(*cell, Rq::zero());
290 }
291 }
292 }
293
294 // #[test]
295 // fn test_commit_recursive() {
296 // let commitment_scheme = create_test_commitment();
297 // let (witnesses, phi) = create_test_witnesses(3);
298
299 // // Create mock inner commitment parts (t and z)
300 // let t_parts = create_test_parts(5);
301 // let z_parts = create_test_parts(5);
302
303 // let (recursive_commitment, recursive_witness) = commitment_scheme
304 // .commit_recursive(&witnesses, &phi, &t_parts, &z_parts)
305 // .unwrap();
306
307 // // Check the output structure
308 // assert!(!recursive_commitment.nu1.as_slice().is_empty());
309 // assert!(!recursive_commitment.nu2.as_slice().is_empty());
310
311 // // Check witness has appropriate parts
312 // assert_eq!(recursive_witness.t_parts.len(), t_parts.len());
313 // assert_eq!(recursive_witness.z_parts.len(), z_parts.len());
314 // assert!(!recursive_witness.g_parts.is_empty());
315 // assert!(!recursive_witness.h_parts.is_empty());
316 // }
317
318 // #[test]
319 // fn test_commit_recursive_correctness() {
320 // let commitment_scheme = create_test_commitment();
321 // let (witnesses, phi) = create_test_witnesses(3);
322
323 // // Create test parts for inner commitment
324 // let t_parts = create_test_parts(5);
325 // let z_parts = create_test_parts(5);
326
327 // // Get the recursive commitment and witness
328 // let (recursive_commitment, recursive_witness) = commitment_scheme
329 // .commit_recursive(&witnesses, &phi, &t_parts, &z_parts)
330 // .unwrap();
331
332 // // Manually compute the expected commitments:
333
334 // // 1. For nu1, combine t_parts and g_parts
335 // let mut combined_parts = recursive_witness.t_parts.clone();
336 // combined_parts.extend(recursive_witness.g_parts.clone());
337
338 // // 2. Create expected witnesses
339 // let expected_nu1_witness = commitment_scheme.create_witness_from_parts(&combined_parts);
340 // let expected_nu2_witness =
341 // commitment_scheme.create_witness_from_parts(&recursive_witness.h_parts);
342
343 // // 3. Create AjtaiCommitment instances with the correct matrices
344 // let nu1_commitment = AjtaiCommitment::new(
345 // commitment_scheme.params.clone(),
346 // commitment_scheme.nu1_matrix.clone(),
347 // )
348 // .unwrap();
349
350 // let nu2_commitment = AjtaiCommitment::new(
351 // commitment_scheme.params.clone(),
352 // commitment_scheme.nu2_matrix.clone(),
353 // )
354 // .unwrap();
355
356 // // 4. Compute expected commitments
357 // let (expected_nu1, _) = nu1_commitment.commit(expected_nu1_witness).unwrap();
358 // let (expected_nu2, _) = nu2_commitment.commit(expected_nu2_witness).unwrap();
359
360 // // 5. Compare expected with actual
361 // assert_eq!(
362 // recursive_commitment.nu1, expected_nu1,
363 // "nu1 commitment does not match expected value"
364 // );
365 // assert_eq!(
366 // recursive_commitment.nu2, expected_nu2,
367 // "nu2 commitment does not match expected value"
368 // );
369 // }
370
371 // #[test]
372 // fn test_decomposition_reconstruction() {
373 // let commitment_scheme = create_test_commitment();
374 // let mut rng = rand::rng();
375
376 // // Create a random polynomial
377 // let original_poly = PolyRing::random(&mut rng, TEST_D);
378 // let original_rq: Rq<TEST_D> = original_poly.clone().into();
379
380 // // Test g decomposition parameters
381 // let g_parts = commitment_scheme.decompose_polynomial(&original_poly, true);
382 // let g_base = commitment_scheme.g_decomp_params.base();
383
384 // // Reconstruct the polynomial from parts
385 // let mut reconstructed_g = Rq::<TEST_D>::zero();
386 // let mut current_base_power = Zq::ONE; // Base^0
387
388 // for part in &g_parts {
389 // // Add part * base^k
390 // reconstructed_g = reconstructed_g.clone() + part.clone().scalar_mul(current_base_power);
391 // // Multiply by base for next iteration
392 // current_base_power *= g_base;
393 // }
394
395 // assert_eq!(
396 // reconstructed_g, original_rq,
397 // "G decomposition reconstruction failed"
398 // );
399
400 // // Test h decomposition parameters
401 // let h_parts = commitment_scheme.decompose_polynomial(&original_poly, false);
402 // let h_base = commitment_scheme.h_decomp_params.base();
403
404 // // Reconstruct the polynomial from parts
405 // let mut reconstructed_h = Rq::<TEST_D>::zero();
406 // let mut current_base_power = Zq::ONE; // Base^0
407
408 // for part in &h_parts {
409 // // Add part * base^k
410 // reconstructed_h = reconstructed_h.clone() + part.clone().scalar_mul(current_base_power);
411 // // Multiply by base for next iteration
412 // current_base_power *= h_base;
413 // }
414
415 // assert_eq!(
416 // reconstructed_h, original_rq,
417 // "H decomposition reconstruction failed"
418 // );
419 // }
420
421 // #[test]
422 // fn test_create_witness_from_parts_edge_cases() {
423 // let commitment_scheme = create_test_commitment();
424 // let mut rng = rand::rng();
425
426 // // Test case 1: parts.len() < N (should pad with zeros)
427 // let few_parts: Vec<Rq<TEST_D>> = (0..TEST_N - 2)
428 // .map(|_| Rq::<TEST_D>::random(&mut rng))
429 // .collect();
430
431 // let witness_few = commitment_scheme.create_witness_from_parts(&few_parts);
432
433 // // Check length is exactly N
434 // assert_eq!(witness_few.as_slice().len(), TEST_N);
435
436 // // Check that last elements are zero
437 // for i in few_parts.len()..TEST_N {
438 // assert_eq!(witness_few[i], Rq::<TEST_D>::zero());
439 // }
440
441 // // Check original parts are preserved (with possible bounding applied)
442 // let witness_bound = commitment_scheme.params.witness_bound();
443 // for (i, part) in few_parts.iter().enumerate() {
444 // let mut bounded_part_coeffs = [Zq::ZERO; TEST_D];
445 // for (j, coeff) in part.get_coefficients().iter().enumerate().take(TEST_D) {
446 // bounded_part_coeffs[j] = coeff.centered_mod(witness_bound);
447 // }
448 // let bounded_part = Rq::<TEST_D>::new(bounded_part_coeffs);
449 // assert_eq!(witness_few[i], bounded_part);
450 // }
451
452 // // Test case 2: parts.len() > N (should truncate to first N)
453 // let many_parts: Vec<Rq<TEST_D>> = (0..TEST_N + 3)
454 // .map(|_| Rq::<TEST_D>::random(&mut rng))
455 // .collect();
456
457 // let witness_many = commitment_scheme.create_witness_from_parts(&many_parts);
458
459 // // Check length is exactly N
460 // assert_eq!(witness_many.as_slice().len(), TEST_N);
461
462 // // Check only first N parts are included (with possible bounding applied)
463 // let witness_bound = commitment_scheme.params.witness_bound();
464 // for i in 0..TEST_N {
465 // let mut bounded_part_coeffs = [Zq::ZERO; TEST_D];
466 // for (j, coeff) in many_parts[i]
467 // .get_coefficients()
468 // .iter()
469 // .enumerate()
470 // .take(TEST_D)
471 // {
472 // bounded_part_coeffs[j] = coeff.centered_mod(witness_bound);
473 // }
474 // let bounded_part = Rq::<TEST_D>::new(bounded_part_coeffs);
475 // assert_eq!(witness_many[i], bounded_part);
476 // }
477
478 // // Test case 3: Coefficient bounding
479 // // Create parts with large coefficients
480 // let large_coeff_parts: Vec<Rq<TEST_D>> = (0..TEST_N)
481 // .map(|_| {
482 // let mut large_part = Rq::<TEST_D>::random(&mut rng);
483 // // Set first coefficient to a large value beyond the witness bound
484 // if let Some(c) = large_part.iter_mut().next() {
485 // *c = Zq::MAX - Zq::ONE;
486 // }
487 // large_part
488 // })
489 // .collect();
490
491 // let witness_large = commitment_scheme.create_witness_from_parts(&large_coeff_parts);
492
493 // // Check coefficients are properly bounded
494 // for i in 0..TEST_N {
495 // let mut bounded_part_coeffs = [Zq::ZERO; TEST_D];
496 // for (j, coeff) in large_coeff_parts[i]
497 // .get_coefficients()
498 // .iter()
499 // .enumerate()
500 // .take(TEST_D)
501 // {
502 // bounded_part_coeffs[j] = coeff.centered_mod(witness_bound);
503 // }
504 // let bounded_part = Rq::<TEST_D>::new(bounded_part_coeffs);
505
506 // assert_eq!(witness_large[i], bounded_part);
507
508 // // Also check that large coefficients were properly bounded
509 // for (j, coeff) in witness_large[i].get_coefficients().iter().enumerate() {
510 // let original_coeff = large_coeff_parts[i].get_coefficients()[j];
511 // if original_coeff > witness_bound {
512 // assert!(*coeff <= witness_bound);
513 // }
514 // }
515 // }
516 // }
517
518 // #[test]
519 // #[allow(clippy::as_conversions)]
520 // fn test_optimal_parameters_accuracy() {
521 // // Choose small, simple values for manual calculation
522 // let n = 4; // Small dimension
523 // let d = 4; // Small degree
524 // let s = 2.0; // Simple standard deviation
525 // let beta = 1.0; // Simple beta value
526
527 // // Manually calculate the expected values according to the paper's formulas
528 // let n_d_sqrt = (n * d) as f64; // = 4.0
529
530 // // s_g = sqrt(n*d) * s^2 = 4.0 * 4.0 = 16.0
531 // let s_g = n_d_sqrt * s * s;
532
533 // // s_h = beta * sqrt(n*d) * s = 1.0 * 4.0 * 2.0 = 8.0
534 // let s_h = beta * n_d_sqrt * s;
535
536 // // b2 ≈ sqrt(12 * s_g) = sqrt(12 * 16.0) = sqrt(192) ≈ 13.856... => 13
537 // let expected_b2 = (12.0 * s_g).sqrt() as u32;
538
539 // // b1 ≈ sqrt(12 * s_h) = sqrt(12 * 8.0) = sqrt(96) ≈ 9.798... => 9
540 // let expected_b1 = (12.0 * s_h).sqrt() as u32;
541
542 // // For q = 2^32:
543 // // t2 ≈ log_b2(q) = log_13(2^32) = 32/log_2(13) ≈ 32/3.7 ≈ 8.65 => 9
544 // let expected_t2 = ((32.0 / (expected_b2 as f64).log2()).ceil() as usize).max(2);
545
546 // // t1 ≈ log_b1(q) = log_9(2^32) = 32/log_2(9) ≈ 32/3.17 ≈ 10.09 => 11
547 // let expected_t1 = ((32.0 / (expected_b1 as f64).log2()).ceil() as usize).max(2);
548
549 // // Call the function under test
550 // let params =
551 // GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::calculate_optimal_parameters(
552 // n, d, s, beta,
553 // );
554
555 // // Check results
556 // assert!(
557 // params.g_base.to_u128() >= expected_b2 as u128 - 1
558 // && params.g_base.to_u128() <= expected_b2 as u128 + 1,
559 // "g_base {}, expected {}",
560 // params.g_base.to_u128(),
561 // expected_b2
562 // );
563
564 // assert!(
565 // params.h_base.to_u128() >= expected_b1 as u128 - 1
566 // && params.h_base.to_u128() <= expected_b1 as u128 + 1,
567 // "h_base {}, expected {}",
568 // params.h_base.to_u128(),
569 // expected_b1
570 // );
571
572 // // For part counts, use approximate comparison due to potential floating point differences
573 // assert!(
574 // params.g_parts >= expected_t2 - 1 && params.g_parts <= expected_t2 + 1,
575 // "g_parts {}, expected {}",
576 // params.g_parts,
577 // expected_t2
578 // );
579
580 // assert!(
581 // params.h_parts >= expected_t1 - 1 && params.h_parts <= expected_t1 + 1,
582 // "h_parts {}, expected {}",
583 // params.h_parts,
584 // expected_t1
585 // );
586 // }
587
588 // #[test]
589 // fn test_error_handling() {
590 // let params = AjtaiParameters::new(Zq::ONE, Zq::ONE).unwrap();
591 // let mut rng = rand::rng();
592 // let nu1_matrix = RqMatrix::<TEST_M, TEST_N, TEST_D>::random(&mut rng);
593 // let nu2_matrix = RqMatrix::<TEST_M, TEST_N, TEST_D>::random(&mut rng);
594
595 // // Test invalid g_base (≤ 1)
596 // let invalid_g_base = GarbageParameters {
597 // g_base: Zq::ONE, // Invalid: base must be > 1
598 // g_parts: 2,
599 // h_base: Zq::new(4),
600 // h_parts: 3,
601 // };
602
603 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
604 // params.clone(),
605 // nu1_matrix.clone(),
606 // nu2_matrix.clone(),
607 // invalid_g_base,
608 // );
609
610 // assert!(result.is_err(), "Should reject g_base ≤ 1");
611 // match result {
612 // Err(HierarchicalError::InvalidBase(_)) => {} // Expected error
613 // _ => panic!("Wrong error type for invalid g_base"),
614 // }
615
616 // // Test invalid g_parts (0)
617 // let invalid_g_parts = GarbageParameters {
618 // g_base: Zq::new(8),
619 // g_parts: 0, // Invalid: parts must be > 0
620 // h_base: Zq::new(4),
621 // h_parts: 3,
622 // };
623
624 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
625 // params.clone(),
626 // nu1_matrix.clone(),
627 // nu2_matrix.clone(),
628 // invalid_g_parts,
629 // );
630
631 // assert!(result.is_err(), "Should reject g_parts = 0");
632 // match result {
633 // Err(HierarchicalError::InvalidPartCount(_)) => {} // Expected error
634 // _ => panic!("Wrong error type for invalid g_parts"),
635 // }
636
637 // // Test invalid h_base (≤ 1)
638 // let invalid_h_base = GarbageParameters {
639 // g_base: Zq::new(8),
640 // g_parts: 2,
641 // h_base: Zq::ZERO, // Invalid: base must be > 0
642 // h_parts: 3,
643 // };
644
645 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
646 // params.clone(),
647 // nu1_matrix.clone(),
648 // nu2_matrix.clone(),
649 // invalid_h_base,
650 // );
651
652 // assert!(result.is_err(), "Should reject h_base ≤ 1");
653 // match result {
654 // Err(HierarchicalError::InvalidBase(_)) => {} // Expected error
655 // _ => panic!("Wrong error type for invalid h_base"),
656 // }
657
658 // // Test invalid h_parts (0)
659 // let invalid_h_parts = GarbageParameters {
660 // g_base: Zq::new(8),
661 // g_parts: 2,
662 // h_base: Zq::new(4),
663 // h_parts: 0, // Invalid: parts must be > 0
664 // };
665
666 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
667 // params.clone(),
668 // nu1_matrix.clone(),
669 // nu2_matrix,
670 // invalid_h_parts,
671 // );
672
673 // assert!(result.is_err(), "Should reject h_parts = 0");
674 // match result {
675 // Err(HierarchicalError::InvalidPartCount(_)) => {} // Expected error
676 // _ => panic!("Wrong error type for invalid h_parts"),
677 // }
678 // }
679}