labrador/core/garbage_polynomials.rs
1use crate::ring::{rq_matrix::RqMatrix, rq_vector::RqVector};
2
3pub struct GarbagePolynomials {
4 witness_vector: Vec<RqVector>,
5 pub g: RqMatrix,
6 pub h: RqMatrix,
7}
8
9impl GarbagePolynomials {
10 pub fn new(witness_vector: Vec<RqVector>) -> Self {
11 Self {
12 witness_vector,
13 g: RqMatrix::new(Vec::new()),
14 h: RqMatrix::new(Vec::new()),
15 }
16 }
17
18 /// Calculate the garbage polynomials g_{ij} = <s_i, s_j>
19 /// Exploits symmetry by only calculating for i ≤ j since g_{ij} = g_{ji}
20 pub fn compute_g(&mut self) {
21 let mut g_i = Vec::new();
22 for i in 0..self.witness_vector.len() {
23 let mut g_ij = Vec::new();
24 for j in 0..=i {
25 // Only calculate for j ≤ i (upper triangular)
26 g_ij.push(&self.witness_vector[i] * &self.witness_vector[j]);
27 }
28 g_i.push(RqVector::new(g_ij));
29 }
30 self.g = RqMatrix::new(g_i);
31 }
32
33 /// Calculate the h_{ij} = <φ_i, s_j> + <φ_j, s_i> garbage polynomials
34 /// In the paper, h_{ij} is defined with a factor of 1/2 in front
35 /// However, since we're using q = 2^32, division by 2 is problematic in Z_q
36 /// So we store h'_{ij} = 2*h_{ij} = <φ_i, s_j> + <φ_j, s_i> directly
37 /// Exploits symmetry by only calculating for i ≤ j since h_{ij} = h_{ji}
38 pub fn compute_h(&mut self, phi: &[RqVector]) {
39 let r = self.witness_vector.len();
40 let mut h_i = Vec::with_capacity((r * (r + 1)) / 2);
41
42 for i in 0..r {
43 let mut h_ij = Vec::new();
44 for j in 0..=i {
45 // Only calculate for j ≤ i (upper triangular)
46 let inner_phi_i_s_j = phi[i].inner_product_poly_vector(&self.witness_vector[j]);
47 let inner_phi_j_s_i = phi[j].inner_product_poly_vector(&self.witness_vector[i]);
48 h_ij.push(inner_phi_i_s_j + inner_phi_j_s_i);
49 }
50 h_i.push(RqVector::new(h_ij));
51 }
52 self.h = RqMatrix::new(h_i);
53 }
54}
55
56// Todo: Revise and complete the following
57// Implementation of the final level optimization (Section 5.6)
58// /// Uses sequentially derived challenges via Fiat-Shamir to simulate the interactive protocol
59// pub fn optimize_final_level(
60// witnesses: &[PolyVector],
61// phi: &[PolyVector],
62// initial_seed: u64,
63// ) -> (PolyRing, Vec<PolyRing>, Vec<PolyRing>) {
64// let r = witnesses.len();
65
66// // Calculate g_0 = Σ_i <s_i, s_i> (diagonal sum)
67// let g0 = (0..r)
68// .map(|i| witnesses[i].inner_product_poly_vector(&witnesses[i]))
69// .fold(
70// PolyRing::zero(witnesses[0].get_elements()[0].len()),
71// |acc, g| &acc + &g,
72// );
73
74// // Generate sequence of challenges using Fiat-Shamir
75// let mut challenges = Vec::with_capacity(r / 2);
76// let mut hasher = DefaultHasher::new();
77// initial_seed.hash(&mut hasher);
78// let mut current_seed = hasher.finish();
79
80// for _ in 0..r / 2 {
81// let mut rng = rand::rngs::StdRng::seed_from_u64(current_seed);
82// let challenge = PolyRing::random(&mut rng, witnesses[0].get_elements()[0].len());
83// challenges.push(challenge);
84
85// // Update seed for next challenge
86// let mut hasher = DefaultHasher::new();
87// current_seed.hash(&mut hasher);
88// current_seed = hasher.finish();
89// }
90
91// // Calculate selected g terms: g_{2i-1} and g_{2i}
92// let mut g_terms = Vec::new();
93// for i in 1..=r / 2 {
94// let idx1 = 2 * i - 2;
95// let idx2 = 2 * i - 1;
96
97// // Use unique challenge for each i
98// let challenge = &challenges[i - 1];
99
100// // Add g_{2i-1}
101// if idx2 < r {
102// // For g_{2i-1} = <s_{2i-2}, c_i * s_{2i-1}>
103// let s_j_scaled = witnesses[idx2]
104// .iter()
105// .map(|p| p * challenge)
106// .collect::<PolyVector>();
107
108// let g_2i_1 = witnesses[idx1].inner_product_poly_vector(&s_j_scaled);
109// g_terms.push(g_2i_1);
110
111// // Add g_{2i} if we have enough witnesses
112// if 2 * i < r {
113// // For g_{2i} = <s_{2i-1}, c_i * s_{2i}>
114// let s_j_scaled = witnesses[2 * i]
115// .iter()
116// .map(|p| p * challenge)
117// .collect::<PolyVector>();
118
119// let g_2i = witnesses[idx2].inner_product_poly_vector(&s_j_scaled);
120// g_terms.push(g_2i);
121// }
122// }
123// }
124
125// // Calculate selected h terms: h_{2i-1} and h_{2i}
126// let mut h_terms = Vec::new();
127// for i in 1..=r / 2 {
128// let idx1 = 2 * i - 2;
129// let idx2 = 2 * i - 1;
130
131// // Use unique challenge for each i
132// let challenge = &challenges[i - 1];
133
134// // Add h_{2i-1}
135// if idx2 < r {
136// // For h_{2i-1} = <φ_{2i-2}, c_i * s_{2i-1}> + <φ_{2i-1}, c_i * s_{2i-2}>
137// let s_j_scaled = witnesses[idx2]
138// .iter()
139// .map(|p| p * challenge)
140// .collect::<PolyVector>();
141
142// let phi_i_s_j = phi[idx1].inner_product_poly_vector(&s_j_scaled);
143
144// let s_i_scaled = witnesses[idx1]
145// .iter()
146// .map(|p| p * challenge)
147// .collect::<PolyVector>();
148
149// let phi_j_s_i = phi[idx2].inner_product_poly_vector(&s_i_scaled);
150
151// let h_2i_1 = &phi_i_s_j + &phi_j_s_i;
152// h_terms.push(h_2i_1);
153
154// // Add h_{2i} if we have enough witnesses
155// if 2 * i < r {
156// // For h_{2i} = <φ_{2i-1}, c_i * s_{2i}> + <φ_{2i}, c_i * s_{2i-1}>
157// let s_j_scaled = witnesses[2 * i]
158// .iter()
159// .map(|p| p * challenge)
160// .collect::<PolyVector>();
161
162// let phi_i_s_j = phi[idx2].inner_product_poly_vector(&s_j_scaled);
163
164// let s_i_scaled = witnesses[idx2]
165// .iter()
166// .map(|p| p * challenge)
167// .collect::<PolyVector>();
168
169// let phi_j_s_i = phi[2 * i].inner_product_poly_vector(&s_i_scaled);
170
171// let h_2i = &phi_i_s_j + &phi_j_s_i;
172// h_terms.push(h_2i);
173// }
174// }
175// }
176
177// (g0, g_terms, h_terms)
178// }
179
180#[cfg(test)]
181mod tests {
182 use super::*;
183 use rand::rng;
184
185 const RANK: usize = 8;
186
187 fn create_test_witnesses(count: usize) -> (Vec<RqVector>, Vec<RqVector>) {
188 let witnesses = (0..count)
189 .map(|_| RqVector::random(&mut rng(), RANK))
190 .collect();
191
192 let phi = (0..count)
193 .map(|_| RqVector::random(&mut rng(), RANK))
194 .collect();
195
196 (witnesses, phi)
197 }
198
199 #[test]
200 fn test_g_matrix_size() {
201 let multiplicity = 3;
202 let (witnesses, _) = create_test_witnesses(multiplicity);
203 let mut garbage_polynomial = GarbagePolynomials::new(witnesses.clone());
204 garbage_polynomial.compute_g();
205
206 assert_eq!(garbage_polynomial.g.get_row_len(), 3);
207 // Assert that g stores half of the matrix
208 for row in 0..multiplicity {
209 assert_eq!(
210 garbage_polynomial.g.get_elements()[row].get_length(),
211 row + 1
212 );
213 }
214 }
215
216 #[test]
217 fn test_g_calculation() {
218 let (witnesses, _) = create_test_witnesses(3);
219
220 let mut garbage_polynomial = GarbagePolynomials::new(witnesses.clone());
221 garbage_polynomial.compute_g();
222
223 // Verify a few specific values
224 let expected_g_01 = witnesses[0].inner_product_poly_vector(&witnesses[1]);
225 let expected_g_10 = witnesses[1].inner_product_poly_vector(&witnesses[0]);
226 assert_eq!(expected_g_01, expected_g_10);
227
228 let expected_g_22 = witnesses[2].inner_product_poly_vector(&witnesses[2]);
229
230 assert_eq!(garbage_polynomial.g.get_cell_symmetric(0, 1), expected_g_01);
231 assert_eq!(garbage_polynomial.g.get_cell_symmetric(1, 0), expected_g_10);
232 assert_eq!(garbage_polynomial.g.get_cell_symmetric(2, 2), expected_g_22);
233 }
234
235 #[test]
236 fn test_h_matrix_size() {
237 let multiplicity = 3;
238 let (witnesses, phi) = create_test_witnesses(multiplicity);
239 let mut garbage_polynomial = GarbagePolynomials::new(witnesses.clone());
240 garbage_polynomial.compute_h(&phi);
241
242 assert_eq!(garbage_polynomial.h.get_row_len(), 3);
243 // Assert that g stores half of the matrix
244 for row in 0..multiplicity {
245 assert_eq!(
246 garbage_polynomial.h.get_elements()[row].get_length(),
247 row + 1
248 );
249 }
250 }
251
252 #[test]
253 fn test_h_calculation() {
254 let (witnesses, phi) = create_test_witnesses(3);
255 let mut garbage_polynomial = GarbagePolynomials::new(witnesses.clone());
256 garbage_polynomial.compute_h(&phi);
257
258 // Verify a specific value
259 let phi_0_s_1 = phi[0].inner_product_poly_vector(&witnesses[1]);
260 let phi_1_s_0 = phi[1].inner_product_poly_vector(&witnesses[0]);
261 let expected_h_01 = phi_0_s_1 + phi_1_s_0;
262
263 assert_eq!(
264 garbage_polynomial.h.get_cell_symmetric(0, 1),
265 garbage_polynomial.h.get_cell_symmetric(1, 0)
266 );
267 assert_eq!(expected_h_01, garbage_polynomial.h.get_cell_symmetric(0, 1));
268 }
269
270 // #[test]
271 // fn test_commit_recursive() {
272 // let commitment_scheme = create_test_commitment();
273 // let (witnesses, phi) = create_test_witnesses(3);
274
275 // // Create mock inner commitment parts (t and z)
276 // let t_parts = create_test_parts(5);
277 // let z_parts = create_test_parts(5);
278
279 // let (recursive_commitment, recursive_witness) = commitment_scheme
280 // .commit_recursive(&witnesses, &phi, &t_parts, &z_parts)
281 // .unwrap();
282
283 // // Check the output structure
284 // assert!(!recursive_commitment.nu1.as_slice().is_empty());
285 // assert!(!recursive_commitment.nu2.as_slice().is_empty());
286
287 // // Check witness has appropriate parts
288 // assert_eq!(recursive_witness.t_parts.len(), t_parts.len());
289 // assert_eq!(recursive_witness.z_parts.len(), z_parts.len());
290 // assert!(!recursive_witness.g_parts.is_empty());
291 // assert!(!recursive_witness.h_parts.is_empty());
292 // }
293
294 // #[test]
295 // fn test_commit_recursive_correctness() {
296 // let commitment_scheme = create_test_commitment();
297 // let (witnesses, phi) = create_test_witnesses(3);
298
299 // // Create test parts for inner commitment
300 // let t_parts = create_test_parts(5);
301 // let z_parts = create_test_parts(5);
302
303 // // Get the recursive commitment and witness
304 // let (recursive_commitment, recursive_witness) = commitment_scheme
305 // .commit_recursive(&witnesses, &phi, &t_parts, &z_parts)
306 // .unwrap();
307
308 // // Manually compute the expected commitments:
309
310 // // 1. For nu1, combine t_parts and g_parts
311 // let mut combined_parts = recursive_witness.t_parts.clone();
312 // combined_parts.extend(recursive_witness.g_parts.clone());
313
314 // // 2. Create expected witnesses
315 // let expected_nu1_witness = commitment_scheme.create_witness_from_parts(&combined_parts);
316 // let expected_nu2_witness =
317 // commitment_scheme.create_witness_from_parts(&recursive_witness.h_parts);
318
319 // // 3. Create AjtaiCommitment instances with the correct matrices
320 // let nu1_commitment = AjtaiCommitment::new(
321 // commitment_scheme.params.clone(),
322 // commitment_scheme.nu1_matrix.clone(),
323 // )
324 // .unwrap();
325
326 // let nu2_commitment = AjtaiCommitment::new(
327 // commitment_scheme.params.clone(),
328 // commitment_scheme.nu2_matrix.clone(),
329 // )
330 // .unwrap();
331
332 // // 4. Compute expected commitments
333 // let (expected_nu1, _) = nu1_commitment.commit(expected_nu1_witness).unwrap();
334 // let (expected_nu2, _) = nu2_commitment.commit(expected_nu2_witness).unwrap();
335
336 // // 5. Compare expected with actual
337 // assert_eq!(
338 // recursive_commitment.nu1, expected_nu1,
339 // "nu1 commitment does not match expected value"
340 // );
341 // assert_eq!(
342 // recursive_commitment.nu2, expected_nu2,
343 // "nu2 commitment does not match expected value"
344 // );
345 // }
346
347 // #[test]
348 // fn test_decomposition_reconstruction() {
349 // let commitment_scheme = create_test_commitment();
350 // let mut rng = rand::rng();
351
352 // // Create a random polynomial
353 // let original_poly = PolyRing::random(&mut rng, TEST_D);
354 // let original_rq: Rq<TEST_D> = original_poly.clone().into();
355
356 // // Test g decomposition parameters
357 // let g_parts = commitment_scheme.decompose_polynomial(&original_poly, true);
358 // let g_base = commitment_scheme.g_decomp_params.base();
359
360 // // Reconstruct the polynomial from parts
361 // let mut reconstructed_g = Rq::<TEST_D>::zero();
362 // let mut current_base_power = Zq::ONE; // Base^0
363
364 // for part in &g_parts {
365 // // Add part * base^k
366 // reconstructed_g = reconstructed_g.clone() + part.clone().scalar_mul(current_base_power);
367 // // Multiply by base for next iteration
368 // current_base_power *= g_base;
369 // }
370
371 // assert_eq!(
372 // reconstructed_g, original_rq,
373 // "G decomposition reconstruction failed"
374 // );
375
376 // // Test h decomposition parameters
377 // let h_parts = commitment_scheme.decompose_polynomial(&original_poly, false);
378 // let h_base = commitment_scheme.h_decomp_params.base();
379
380 // // Reconstruct the polynomial from parts
381 // let mut reconstructed_h = Rq::<TEST_D>::zero();
382 // let mut current_base_power = Zq::ONE; // Base^0
383
384 // for part in &h_parts {
385 // // Add part * base^k
386 // reconstructed_h = reconstructed_h.clone() + part.clone().scalar_mul(current_base_power);
387 // // Multiply by base for next iteration
388 // current_base_power *= h_base;
389 // }
390
391 // assert_eq!(
392 // reconstructed_h, original_rq,
393 // "H decomposition reconstruction failed"
394 // );
395 // }
396
397 // #[test]
398 // fn test_create_witness_from_parts_edge_cases() {
399 // let commitment_scheme = create_test_commitment();
400 // let mut rng = rand::rng();
401
402 // // Test case 1: parts.len() < N (should pad with zeros)
403 // let few_parts: Vec<Rq<TEST_D>> = (0..TEST_N - 2)
404 // .map(|_| Rq::<TEST_D>::random(&mut rng))
405 // .collect();
406
407 // let witness_few = commitment_scheme.create_witness_from_parts(&few_parts);
408
409 // // Check length is exactly N
410 // assert_eq!(witness_few.as_slice().len(), TEST_N);
411
412 // // Check that last elements are zero
413 // for i in few_parts.len()..TEST_N {
414 // assert_eq!(witness_few[i], Rq::<TEST_D>::zero());
415 // }
416
417 // // Check original parts are preserved (with possible bounding applied)
418 // let witness_bound = commitment_scheme.params.witness_bound();
419 // for (i, part) in few_parts.iter().enumerate() {
420 // let mut bounded_part_coeffs = [Zq::ZERO; TEST_D];
421 // for (j, coeff) in part.get_coefficients().iter().enumerate().take(TEST_D) {
422 // bounded_part_coeffs[j] = coeff.centered_mod(witness_bound);
423 // }
424 // let bounded_part = Rq::<TEST_D>::new(bounded_part_coeffs);
425 // assert_eq!(witness_few[i], bounded_part);
426 // }
427
428 // // Test case 2: parts.len() > N (should truncate to first N)
429 // let many_parts: Vec<Rq<TEST_D>> = (0..TEST_N + 3)
430 // .map(|_| Rq::<TEST_D>::random(&mut rng))
431 // .collect();
432
433 // let witness_many = commitment_scheme.create_witness_from_parts(&many_parts);
434
435 // // Check length is exactly N
436 // assert_eq!(witness_many.as_slice().len(), TEST_N);
437
438 // // Check only first N parts are included (with possible bounding applied)
439 // let witness_bound = commitment_scheme.params.witness_bound();
440 // for i in 0..TEST_N {
441 // let mut bounded_part_coeffs = [Zq::ZERO; TEST_D];
442 // for (j, coeff) in many_parts[i]
443 // .get_coefficients()
444 // .iter()
445 // .enumerate()
446 // .take(TEST_D)
447 // {
448 // bounded_part_coeffs[j] = coeff.centered_mod(witness_bound);
449 // }
450 // let bounded_part = Rq::<TEST_D>::new(bounded_part_coeffs);
451 // assert_eq!(witness_many[i], bounded_part);
452 // }
453
454 // // Test case 3: Coefficient bounding
455 // // Create parts with large coefficients
456 // let large_coeff_parts: Vec<Rq<TEST_D>> = (0..TEST_N)
457 // .map(|_| {
458 // let mut large_part = Rq::<TEST_D>::random(&mut rng);
459 // // Set first coefficient to a large value beyond the witness bound
460 // if let Some(c) = large_part.iter_mut().next() {
461 // *c = Zq::MAX - Zq::ONE;
462 // }
463 // large_part
464 // })
465 // .collect();
466
467 // let witness_large = commitment_scheme.create_witness_from_parts(&large_coeff_parts);
468
469 // // Check coefficients are properly bounded
470 // for i in 0..TEST_N {
471 // let mut bounded_part_coeffs = [Zq::ZERO; TEST_D];
472 // for (j, coeff) in large_coeff_parts[i]
473 // .get_coefficients()
474 // .iter()
475 // .enumerate()
476 // .take(TEST_D)
477 // {
478 // bounded_part_coeffs[j] = coeff.centered_mod(witness_bound);
479 // }
480 // let bounded_part = Rq::<TEST_D>::new(bounded_part_coeffs);
481
482 // assert_eq!(witness_large[i], bounded_part);
483
484 // // Also check that large coefficients were properly bounded
485 // for (j, coeff) in witness_large[i].get_coefficients().iter().enumerate() {
486 // let original_coeff = large_coeff_parts[i].get_coefficients()[j];
487 // if original_coeff > witness_bound {
488 // assert!(*coeff <= witness_bound);
489 // }
490 // }
491 // }
492 // }
493
494 // #[test]
495 // #[allow(clippy::as_conversions)]
496 // fn test_optimal_parameters_accuracy() {
497 // // Choose small, simple values for manual calculation
498 // let n = 4; // Small dimension
499 // let d = 4; // Small degree
500 // let s = 2.0; // Simple standard deviation
501 // let beta = 1.0; // Simple beta value
502
503 // // Manually calculate the expected values according to the paper's formulas
504 // let n_d_sqrt = (n * d) as f64; // = 4.0
505
506 // // s_g = sqrt(n*d) * s^2 = 4.0 * 4.0 = 16.0
507 // let s_g = n_d_sqrt * s * s;
508
509 // // s_h = beta * sqrt(n*d) * s = 1.0 * 4.0 * 2.0 = 8.0
510 // let s_h = beta * n_d_sqrt * s;
511
512 // // b2 ≈ sqrt(12 * s_g) = sqrt(12 * 16.0) = sqrt(192) ≈ 13.856... => 13
513 // let expected_b2 = (12.0 * s_g).sqrt() as u32;
514
515 // // b1 ≈ sqrt(12 * s_h) = sqrt(12 * 8.0) = sqrt(96) ≈ 9.798... => 9
516 // let expected_b1 = (12.0 * s_h).sqrt() as u32;
517
518 // // For q = 2^32:
519 // // t2 ≈ log_b2(q) = log_13(2^32) = 32/log_2(13) ≈ 32/3.7 ≈ 8.65 => 9
520 // let expected_t2 = ((32.0 / (expected_b2 as f64).log2()).ceil() as usize).max(2);
521
522 // // t1 ≈ log_b1(q) = log_9(2^32) = 32/log_2(9) ≈ 32/3.17 ≈ 10.09 => 11
523 // let expected_t1 = ((32.0 / (expected_b1 as f64).log2()).ceil() as usize).max(2);
524
525 // // Call the function under test
526 // let params =
527 // GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::calculate_optimal_parameters(
528 // n, d, s, beta,
529 // );
530
531 // // Check results
532 // assert!(
533 // params.g_base.to_u128() >= expected_b2 as u128 - 1
534 // && params.g_base.to_u128() <= expected_b2 as u128 + 1,
535 // "g_base {}, expected {}",
536 // params.g_base.to_u128(),
537 // expected_b2
538 // );
539
540 // assert!(
541 // params.h_base.to_u128() >= expected_b1 as u128 - 1
542 // && params.h_base.to_u128() <= expected_b1 as u128 + 1,
543 // "h_base {}, expected {}",
544 // params.h_base.to_u128(),
545 // expected_b1
546 // );
547
548 // // For part counts, use approximate comparison due to potential floating point differences
549 // assert!(
550 // params.g_parts >= expected_t2 - 1 && params.g_parts <= expected_t2 + 1,
551 // "g_parts {}, expected {}",
552 // params.g_parts,
553 // expected_t2
554 // );
555
556 // assert!(
557 // params.h_parts >= expected_t1 - 1 && params.h_parts <= expected_t1 + 1,
558 // "h_parts {}, expected {}",
559 // params.h_parts,
560 // expected_t1
561 // );
562 // }
563
564 // #[test]
565 // fn test_error_handling() {
566 // let params = AjtaiParameters::new(Zq::ONE, Zq::ONE).unwrap();
567 // let mut rng = rand::rng();
568 // let nu1_matrix = RqMatrix::<TEST_M, TEST_N, TEST_D>::random(&mut rng);
569 // let nu2_matrix = RqMatrix::<TEST_M, TEST_N, TEST_D>::random(&mut rng);
570
571 // // Test invalid g_base (≤ 1)
572 // let invalid_g_base = GarbageParameters {
573 // g_base: Zq::ONE, // Invalid: base must be > 1
574 // g_parts: 2,
575 // h_base: Zq::new(4),
576 // h_parts: 3,
577 // };
578
579 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
580 // params.clone(),
581 // nu1_matrix.clone(),
582 // nu2_matrix.clone(),
583 // invalid_g_base,
584 // );
585
586 // assert!(result.is_err(), "Should reject g_base ≤ 1");
587 // match result {
588 // Err(HierarchicalError::InvalidBase(_)) => {} // Expected error
589 // _ => panic!("Wrong error type for invalid g_base"),
590 // }
591
592 // // Test invalid g_parts (0)
593 // let invalid_g_parts = GarbageParameters {
594 // g_base: Zq::new(8),
595 // g_parts: 0, // Invalid: parts must be > 0
596 // h_base: Zq::new(4),
597 // h_parts: 3,
598 // };
599
600 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
601 // params.clone(),
602 // nu1_matrix.clone(),
603 // nu2_matrix.clone(),
604 // invalid_g_parts,
605 // );
606
607 // assert!(result.is_err(), "Should reject g_parts = 0");
608 // match result {
609 // Err(HierarchicalError::InvalidPartCount(_)) => {} // Expected error
610 // _ => panic!("Wrong error type for invalid g_parts"),
611 // }
612
613 // // Test invalid h_base (≤ 1)
614 // let invalid_h_base = GarbageParameters {
615 // g_base: Zq::new(8),
616 // g_parts: 2,
617 // h_base: Zq::ZERO, // Invalid: base must be > 0
618 // h_parts: 3,
619 // };
620
621 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
622 // params.clone(),
623 // nu1_matrix.clone(),
624 // nu2_matrix.clone(),
625 // invalid_h_base,
626 // );
627
628 // assert!(result.is_err(), "Should reject h_base ≤ 1");
629 // match result {
630 // Err(HierarchicalError::InvalidBase(_)) => {} // Expected error
631 // _ => panic!("Wrong error type for invalid h_base"),
632 // }
633
634 // // Test invalid h_parts (0)
635 // let invalid_h_parts = GarbageParameters {
636 // g_base: Zq::new(8),
637 // g_parts: 2,
638 // h_base: Zq::new(4),
639 // h_parts: 0, // Invalid: parts must be > 0
640 // };
641
642 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
643 // params.clone(),
644 // nu1_matrix.clone(),
645 // nu2_matrix,
646 // invalid_h_parts,
647 // );
648
649 // assert!(result.is_err(), "Should reject h_parts = 0");
650 // match result {
651 // Err(HierarchicalError::InvalidPartCount(_)) => {} // Expected error
652 // _ => panic!("Wrong error type for invalid h_parts"),
653 // }
654 // }
655}