labrador/core/garbage_polynomials.rs
1use crate::ring::{rq_matrix::RqMatrix, rq_vector::RqVector};
2
3use super::inner_product;
4
5/// Calculate the garbage polynomials g_{ij} = <s_i, s_j>
6/// Exploits symmetry by only calculating for i ≤ j since g_{ij} = g_{ji}
7pub fn compute_g(witness_vector: &[RqVector]) -> RqMatrix {
8 let mut g_i = Vec::new();
9 for i in 0..witness_vector.len() {
10 let mut g_ij = Vec::new();
11 for j in 0..=i {
12 // Only calculate for j ≤ i (upper triangular)
13 g_ij.push(inner_product::compute_linear_combination(
14 witness_vector[i].elements(),
15 witness_vector[j].elements(),
16 ));
17 }
18 g_i.push(RqVector::new(g_ij));
19 }
20 RqMatrix::new(g_i, true)
21}
22
23/// Calculate the h_{ij} = <φ_i, s_j> + <φ_j, s_i> garbage polynomials
24/// In the paper, h_{ij} is defined with a factor of 1/2 in front
25/// However, since we're using q = 2^32, division by 2 is problematic in Z_q
26/// So we store h'_{ij} = 2*h_{ij} = <φ_i, s_j> + <φ_j, s_i> directly.
27/// Therefore the bound for commitment scheme of h_ij should be 4 times larger than the bound specified in the paper.
28///
29/// Exploits symmetry by only calculating for i ≤ j since h_{ij} = h_{ji}.
30pub fn compute_h(witness_vector: &[RqVector], phi: &[RqVector]) -> RqMatrix {
31 let r = witness_vector.len();
32 let mut h_i = Vec::with_capacity((r * (r + 1)) / 2);
33
34 for i in 0..r {
35 let mut h_ij = Vec::new();
36 for j in 0..=i {
37 // Only calculate for j ≤ i (upper triangular)
38 let inner_phi_i_s_j = inner_product::compute_linear_combination(
39 phi[i].elements(),
40 witness_vector[j].elements(),
41 );
42 let inner_phi_j_s_i = inner_product::compute_linear_combination(
43 phi[j].elements(),
44 witness_vector[i].elements(),
45 );
46 h_ij.push(&inner_phi_i_s_j + &inner_phi_j_s_i);
47 }
48 h_i.push(RqVector::new(h_ij));
49 }
50 RqMatrix::new(h_i, true)
51}
52
53// Todo: Revise and complete the following
54// Implementation of the final level optimization (Section 5.6)
55// /// Uses sequentially derived challenges via Fiat-Shamir to simulate the interactive protocol
56// pub fn optimize_final_level(
57// witnesses: &[PolyVector],
58// phi: &[PolyVector],
59// initial_seed: u64,
60// ) -> (PolyRing, Vec<PolyRing>, Vec<PolyRing>) {
61// let r = witnesses.len();
62
63// // Calculate g_0 = Σ_i <s_i, s_i> (diagonal sum)
64// let g0 = (0..r)
65// .map(|i| witnesses[i].inner_product_poly_vector(&witnesses[i]))
66// .fold(
67// PolyRing::zero(witnesses[0].elements()[0].len()),
68// |acc, g| &acc + &g,
69// );
70
71// // Generate sequence of challenges using Fiat-Shamir
72// let mut challenges = Vec::with_capacity(r / 2);
73// let mut hasher = DefaultHasher::new();
74// initial_seed.hash(&mut hasher);
75// let mut current_seed = hasher.finish();
76
77// for _ in 0..r / 2 {
78// let mut rng = rand::rngs::StdRng::seed_from_u64(current_seed);
79// let challenge = PolyRing::random(&mut rng, witnesses[0].elements()[0].len());
80// challenges.push(challenge);
81
82// // Update seed for next challenge
83// let mut hasher = DefaultHasher::new();
84// current_seed.hash(&mut hasher);
85// current_seed = hasher.finish();
86// }
87
88// // Calculate selected g terms: g_{2i-1} and g_{2i}
89// let mut g_terms = Vec::new();
90// for i in 1..=r / 2 {
91// let idx1 = 2 * i - 2;
92// let idx2 = 2 * i - 1;
93
94// // Use unique challenge for each i
95// let challenge = &challenges[i - 1];
96
97// // Add g_{2i-1}
98// if idx2 < r {
99// // For g_{2i-1} = <s_{2i-2}, c_i * s_{2i-1}>
100// let s_j_scaled = witnesses[idx2]
101// .iter()
102// .map(|p| p * challenge)
103// .collect::<PolyVector>();
104
105// let g_2i_1 = witnesses[idx1].inner_product_poly_vector(&s_j_scaled);
106// g_terms.push(g_2i_1);
107
108// // Add g_{2i} if we have enough witnesses
109// if 2 * i < r {
110// // For g_{2i} = <s_{2i-1}, c_i * s_{2i}>
111// let s_j_scaled = witnesses[2 * i]
112// .iter()
113// .map(|p| p * challenge)
114// .collect::<PolyVector>();
115
116// let g_2i = witnesses[idx2].inner_product_poly_vector(&s_j_scaled);
117// g_terms.push(g_2i);
118// }
119// }
120// }
121
122// // Calculate selected h terms: h_{2i-1} and h_{2i}
123// let mut h_terms = Vec::new();
124// for i in 1..=r / 2 {
125// let idx1 = 2 * i - 2;
126// let idx2 = 2 * i - 1;
127
128// // Use unique challenge for each i
129// let challenge = &challenges[i - 1];
130
131// // Add h_{2i-1}
132// if idx2 < r {
133// // For h_{2i-1} = <φ_{2i-2}, c_i * s_{2i-1}> + <φ_{2i-1}, c_i * s_{2i-2}>
134// let s_j_scaled = witnesses[idx2]
135// .iter()
136// .map(|p| p * challenge)
137// .collect::<PolyVector>();
138
139// let phi_i_s_j = phi[idx1].inner_product_poly_vector(&s_j_scaled);
140
141// let s_i_scaled = witnesses[idx1]
142// .iter()
143// .map(|p| p * challenge)
144// .collect::<PolyVector>();
145
146// let phi_j_s_i = phi[idx2].inner_product_poly_vector(&s_i_scaled);
147
148// let h_2i_1 = &phi_i_s_j + &phi_j_s_i;
149// h_terms.push(h_2i_1);
150
151// // Add h_{2i} if we have enough witnesses
152// if 2 * i < r {
153// // For h_{2i} = <φ_{2i-1}, c_i * s_{2i}> + <φ_{2i}, c_i * s_{2i-1}>
154// let s_j_scaled = witnesses[2 * i]
155// .iter()
156// .map(|p| p * challenge)
157// .collect::<PolyVector>();
158
159// let phi_i_s_j = phi[idx2].inner_product_poly_vector(&s_j_scaled);
160
161// let s_i_scaled = witnesses[idx2]
162// .iter()
163// .map(|p| p * challenge)
164// .collect::<PolyVector>();
165
166// let phi_j_s_i = phi[2 * i].inner_product_poly_vector(&s_i_scaled);
167
168// let h_2i = &phi_i_s_j + &phi_j_s_i;
169// h_terms.push(h_2i);
170// }
171// }
172// }
173
174// (g0, g_terms, h_terms)
175// }
176
177#[cfg(test)]
178mod tests {
179 use crate::ring::rq::Rq;
180
181 use super::*;
182 use rand::rng;
183
184 const RANK: usize = 8;
185
186 fn create_test_witnesses(count: usize) -> (Vec<RqVector>, Vec<RqVector>) {
187 let witnesses = (0..count)
188 .map(|_| RqVector::random(&mut rng(), RANK))
189 .collect();
190
191 let phi = (0..count)
192 .map(|_| RqVector::random(&mut rng(), RANK))
193 .collect();
194
195 (witnesses, phi)
196 }
197
198 #[test]
199 fn test_g_matrix_size() {
200 let multiplicity = 3;
201 let (witnesses, _) = create_test_witnesses(multiplicity);
202 let g = compute_g(&witnesses);
203
204 assert_eq!(g.row_len(), 3);
205 // Assert that g stores half of the matrix
206 for row in 0..multiplicity {
207 assert_eq!(g.elements()[row].len(), row + 1);
208 }
209 }
210
211 #[test]
212 fn test_g_calculation() {
213 let (witnesses, _) = create_test_witnesses(3);
214
215 let g = compute_g(&witnesses);
216
217 // Verify a few specific values
218 let expected_g_01 = inner_product::compute_linear_combination(
219 witnesses[0].elements(),
220 witnesses[1].elements(),
221 );
222 let expected_g_10 = inner_product::compute_linear_combination(
223 witnesses[1].elements(),
224 witnesses[0].elements(),
225 );
226 assert_eq!(expected_g_01, expected_g_10);
227
228 let expected_g_22 = inner_product::compute_linear_combination(
229 witnesses[2].elements(),
230 witnesses[2].elements(),
231 );
232
233 assert_eq!(*g.get_cell(0, 1), expected_g_01);
234 assert_eq!(*g.get_cell(1, 0), expected_g_10);
235 assert_eq!(*g.get_cell(2, 2), expected_g_22);
236 }
237
238 #[test]
239 fn test_h_matrix_size() {
240 let multiplicity = 3;
241 let (witnesses, phi) = create_test_witnesses(multiplicity);
242 let h = compute_h(&witnesses, &phi);
243
244 assert_eq!(h.row_len(), 3);
245 // Assert that g stores half of the matrix
246 for row in 0..multiplicity {
247 assert_eq!(h.elements()[row].len(), row + 1);
248 }
249 }
250
251 #[test]
252 fn test_h_calculation() {
253 let (witnesses, phi) = create_test_witnesses(3);
254 let h = compute_h(&witnesses, &phi);
255
256 // Verify a specific value
257 let phi_0_s_1 =
258 inner_product::compute_linear_combination(phi[0].elements(), witnesses[1].elements());
259 let phi_1_s_0 =
260 inner_product::compute_linear_combination(phi[1].elements(), witnesses[0].elements());
261 let expected_h_01 = &phi_0_s_1 + &phi_1_s_0;
262
263 assert_eq!(h.get_cell(0, 1), h.get_cell(1, 0));
264 assert_eq!(expected_h_01, *h.get_cell(0, 1));
265 }
266
267 #[test]
268 fn test_g_computation_with_zero_vectors() {
269 let witness_vector = vec![RqVector::zero(100); 50];
270 let g = compute_g(&witness_vector);
271
272 for row in g.elements() {
273 for cell in row.elements() {
274 assert_eq!(*cell, Rq::zero());
275 }
276 }
277 }
278
279 #[test]
280 fn test_h_computation_with_zero_vectors() {
281 let witness_vector = vec![RqVector::zero(100); 50];
282 let phi = vec![RqVector::zero(100); 50];
283 let h = compute_h(&witness_vector, &phi);
284
285 for row in h.elements() {
286 for cell in row.elements() {
287 assert_eq!(*cell, Rq::zero());
288 }
289 }
290 }
291
292 // #[test]
293 // fn test_commit_recursive() {
294 // let commitment_scheme = create_test_commitment();
295 // let (witnesses, phi) = create_test_witnesses(3);
296
297 // // Create mock inner commitment parts (t and z)
298 // let t_parts = create_test_parts(5);
299 // let z_parts = create_test_parts(5);
300
301 // let (recursive_commitment, recursive_witness) = commitment_scheme
302 // .commit_recursive(&witnesses, &phi, &t_parts, &z_parts)
303 // .unwrap();
304
305 // // Check the output structure
306 // assert!(!recursive_commitment.nu1.as_slice().is_empty());
307 // assert!(!recursive_commitment.nu2.as_slice().is_empty());
308
309 // // Check witness has appropriate parts
310 // assert_eq!(recursive_witness.t_parts.len(), t_parts.len());
311 // assert_eq!(recursive_witness.z_parts.len(), z_parts.len());
312 // assert!(!recursive_witness.g_parts.is_empty());
313 // assert!(!recursive_witness.h_parts.is_empty());
314 // }
315
316 // #[test]
317 // fn test_commit_recursive_correctness() {
318 // let commitment_scheme = create_test_commitment();
319 // let (witnesses, phi) = create_test_witnesses(3);
320
321 // // Create test parts for inner commitment
322 // let t_parts = create_test_parts(5);
323 // let z_parts = create_test_parts(5);
324
325 // // Get the recursive commitment and witness
326 // let (recursive_commitment, recursive_witness) = commitment_scheme
327 // .commit_recursive(&witnesses, &phi, &t_parts, &z_parts)
328 // .unwrap();
329
330 // // Manually compute the expected commitments:
331
332 // // 1. For nu1, combine t_parts and g_parts
333 // let mut combined_parts = recursive_witness.t_parts.clone();
334 // combined_parts.extend(recursive_witness.g_parts.clone());
335
336 // // 2. Create expected witnesses
337 // let expected_nu1_witness = commitment_scheme.create_witness_from_parts(&combined_parts);
338 // let expected_nu2_witness =
339 // commitment_scheme.create_witness_from_parts(&recursive_witness.h_parts);
340
341 // // 3. Create AjtaiCommitment instances with the correct matrices
342 // let nu1_commitment = AjtaiCommitment::new(
343 // commitment_scheme.params.clone(),
344 // commitment_scheme.nu1_matrix.clone(),
345 // )
346 // .unwrap();
347
348 // let nu2_commitment = AjtaiCommitment::new(
349 // commitment_scheme.params.clone(),
350 // commitment_scheme.nu2_matrix.clone(),
351 // )
352 // .unwrap();
353
354 // // 4. Compute expected commitments
355 // let (expected_nu1, _) = nu1_commitment.commit(expected_nu1_witness).unwrap();
356 // let (expected_nu2, _) = nu2_commitment.commit(expected_nu2_witness).unwrap();
357
358 // // 5. Compare expected with actual
359 // assert_eq!(
360 // recursive_commitment.nu1, expected_nu1,
361 // "nu1 commitment does not match expected value"
362 // );
363 // assert_eq!(
364 // recursive_commitment.nu2, expected_nu2,
365 // "nu2 commitment does not match expected value"
366 // );
367 // }
368
369 // #[test]
370 // fn test_decomposition_reconstruction() {
371 // let commitment_scheme = create_test_commitment();
372 // let mut rng = rand::rng();
373
374 // // Create a random polynomial
375 // let original_poly = PolyRing::random(&mut rng, TEST_D);
376 // let original_rq: Rq<TEST_D> = original_poly.clone().into();
377
378 // // Test g decomposition parameters
379 // let g_parts = commitment_scheme.decompose_polynomial(&original_poly, true);
380 // let g_base = commitment_scheme.g_decomp_params.base();
381
382 // // Reconstruct the polynomial from parts
383 // let mut reconstructed_g = Rq::<TEST_D>::zero();
384 // let mut current_base_power = Zq::ONE; // Base^0
385
386 // for part in &g_parts {
387 // // Add part * base^k
388 // reconstructed_g = reconstructed_g.clone() + part.clone().scalar_mul(current_base_power);
389 // // Multiply by base for next iteration
390 // current_base_power *= g_base;
391 // }
392
393 // assert_eq!(
394 // reconstructed_g, original_rq,
395 // "G decomposition reconstruction failed"
396 // );
397
398 // // Test h decomposition parameters
399 // let h_parts = commitment_scheme.decompose_polynomial(&original_poly, false);
400 // let h_base = commitment_scheme.h_decomp_params.base();
401
402 // // Reconstruct the polynomial from parts
403 // let mut reconstructed_h = Rq::<TEST_D>::zero();
404 // let mut current_base_power = Zq::ONE; // Base^0
405
406 // for part in &h_parts {
407 // // Add part * base^k
408 // reconstructed_h = reconstructed_h.clone() + part.clone().scalar_mul(current_base_power);
409 // // Multiply by base for next iteration
410 // current_base_power *= h_base;
411 // }
412
413 // assert_eq!(
414 // reconstructed_h, original_rq,
415 // "H decomposition reconstruction failed"
416 // );
417 // }
418
419 // #[test]
420 // fn test_create_witness_from_parts_edge_cases() {
421 // let commitment_scheme = create_test_commitment();
422 // let mut rng = rand::rng();
423
424 // // Test case 1: parts.len() < N (should pad with zeros)
425 // let few_parts: Vec<Rq<TEST_D>> = (0..TEST_N - 2)
426 // .map(|_| Rq::<TEST_D>::random(&mut rng))
427 // .collect();
428
429 // let witness_few = commitment_scheme.create_witness_from_parts(&few_parts);
430
431 // // Check length is exactly N
432 // assert_eq!(witness_few.as_slice().len(), TEST_N);
433
434 // // Check that last elements are zero
435 // for i in few_parts.len()..TEST_N {
436 // assert_eq!(witness_few[i], Rq::<TEST_D>::zero());
437 // }
438
439 // // Check original parts are preserved (with possible bounding applied)
440 // let witness_bound = commitment_scheme.params.witness_bound();
441 // for (i, part) in few_parts.iter().enumerate() {
442 // let mut bounded_part_coeffs = [Zq::ZERO; TEST_D];
443 // for (j, coeff) in part.get_coefficients().iter().enumerate().take(TEST_D) {
444 // bounded_part_coeffs[j] = coeff.centered_mod(witness_bound);
445 // }
446 // let bounded_part = Rq::<TEST_D>::new(bounded_part_coeffs);
447 // assert_eq!(witness_few[i], bounded_part);
448 // }
449
450 // // Test case 2: parts.len() > N (should truncate to first N)
451 // let many_parts: Vec<Rq<TEST_D>> = (0..TEST_N + 3)
452 // .map(|_| Rq::<TEST_D>::random(&mut rng))
453 // .collect();
454
455 // let witness_many = commitment_scheme.create_witness_from_parts(&many_parts);
456
457 // // Check length is exactly N
458 // assert_eq!(witness_many.as_slice().len(), TEST_N);
459
460 // // Check only first N parts are included (with possible bounding applied)
461 // let witness_bound = commitment_scheme.params.witness_bound();
462 // for i in 0..TEST_N {
463 // let mut bounded_part_coeffs = [Zq::ZERO; TEST_D];
464 // for (j, coeff) in many_parts[i]
465 // .get_coefficients()
466 // .iter()
467 // .enumerate()
468 // .take(TEST_D)
469 // {
470 // bounded_part_coeffs[j] = coeff.centered_mod(witness_bound);
471 // }
472 // let bounded_part = Rq::<TEST_D>::new(bounded_part_coeffs);
473 // assert_eq!(witness_many[i], bounded_part);
474 // }
475
476 // // Test case 3: Coefficient bounding
477 // // Create parts with large coefficients
478 // let large_coeff_parts: Vec<Rq<TEST_D>> = (0..TEST_N)
479 // .map(|_| {
480 // let mut large_part = Rq::<TEST_D>::random(&mut rng);
481 // // Set first coefficient to a large value beyond the witness bound
482 // if let Some(c) = large_part.iter_mut().next() {
483 // *c = Zq::MAX - Zq::ONE;
484 // }
485 // large_part
486 // })
487 // .collect();
488
489 // let witness_large = commitment_scheme.create_witness_from_parts(&large_coeff_parts);
490
491 // // Check coefficients are properly bounded
492 // for i in 0..TEST_N {
493 // let mut bounded_part_coeffs = [Zq::ZERO; TEST_D];
494 // for (j, coeff) in large_coeff_parts[i]
495 // .get_coefficients()
496 // .iter()
497 // .enumerate()
498 // .take(TEST_D)
499 // {
500 // bounded_part_coeffs[j] = coeff.centered_mod(witness_bound);
501 // }
502 // let bounded_part = Rq::<TEST_D>::new(bounded_part_coeffs);
503
504 // assert_eq!(witness_large[i], bounded_part);
505
506 // // Also check that large coefficients were properly bounded
507 // for (j, coeff) in witness_large[i].get_coefficients().iter().enumerate() {
508 // let original_coeff = large_coeff_parts[i].get_coefficients()[j];
509 // if original_coeff > witness_bound {
510 // assert!(*coeff <= witness_bound);
511 // }
512 // }
513 // }
514 // }
515
516 // #[test]
517 // #[allow(clippy::as_conversions)]
518 // fn test_optimal_parameters_accuracy() {
519 // // Choose small, simple values for manual calculation
520 // let n = 4; // Small dimension
521 // let d = 4; // Small degree
522 // let s = 2.0; // Simple standard deviation
523 // let beta = 1.0; // Simple beta value
524
525 // // Manually calculate the expected values according to the paper's formulas
526 // let n_d_sqrt = (n * d) as f64; // = 4.0
527
528 // // s_g = sqrt(n*d) * s^2 = 4.0 * 4.0 = 16.0
529 // let s_g = n_d_sqrt * s * s;
530
531 // // s_h = beta * sqrt(n*d) * s = 1.0 * 4.0 * 2.0 = 8.0
532 // let s_h = beta * n_d_sqrt * s;
533
534 // // b2 ≈ sqrt(12 * s_g) = sqrt(12 * 16.0) = sqrt(192) ≈ 13.856... => 13
535 // let expected_b2 = (12.0 * s_g).sqrt() as u32;
536
537 // // b1 ≈ sqrt(12 * s_h) = sqrt(12 * 8.0) = sqrt(96) ≈ 9.798... => 9
538 // let expected_b1 = (12.0 * s_h).sqrt() as u32;
539
540 // // For q = 2^32:
541 // // t2 ≈ log_b2(q) = log_13(2^32) = 32/log_2(13) ≈ 32/3.7 ≈ 8.65 => 9
542 // let expected_t2 = ((32.0 / (expected_b2 as f64).log2()).ceil() as usize).max(2);
543
544 // // t1 ≈ log_b1(q) = log_9(2^32) = 32/log_2(9) ≈ 32/3.17 ≈ 10.09 => 11
545 // let expected_t1 = ((32.0 / (expected_b1 as f64).log2()).ceil() as usize).max(2);
546
547 // // Call the function under test
548 // let params =
549 // GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::calculate_optimal_parameters(
550 // n, d, s, beta,
551 // );
552
553 // // Check results
554 // assert!(
555 // params.g_base.to_u128() >= expected_b2 as u128 - 1
556 // && params.g_base.to_u128() <= expected_b2 as u128 + 1,
557 // "g_base {}, expected {}",
558 // params.g_base.to_u128(),
559 // expected_b2
560 // );
561
562 // assert!(
563 // params.h_base.to_u128() >= expected_b1 as u128 - 1
564 // && params.h_base.to_u128() <= expected_b1 as u128 + 1,
565 // "h_base {}, expected {}",
566 // params.h_base.to_u128(),
567 // expected_b1
568 // );
569
570 // // For part counts, use approximate comparison due to potential floating point differences
571 // assert!(
572 // params.g_parts >= expected_t2 - 1 && params.g_parts <= expected_t2 + 1,
573 // "g_parts {}, expected {}",
574 // params.g_parts,
575 // expected_t2
576 // );
577
578 // assert!(
579 // params.h_parts >= expected_t1 - 1 && params.h_parts <= expected_t1 + 1,
580 // "h_parts {}, expected {}",
581 // params.h_parts,
582 // expected_t1
583 // );
584 // }
585
586 // #[test]
587 // fn test_error_handling() {
588 // let params = AjtaiParameters::new(Zq::ONE, Zq::ONE).unwrap();
589 // let mut rng = rand::rng();
590 // let nu1_matrix = RqMatrix::<TEST_M, TEST_N, TEST_D>::random(&mut rng);
591 // let nu2_matrix = RqMatrix::<TEST_M, TEST_N, TEST_D>::random(&mut rng);
592
593 // // Test invalid g_base (≤ 1)
594 // let invalid_g_base = GarbageParameters {
595 // g_base: Zq::ONE, // Invalid: base must be > 1
596 // g_parts: 2,
597 // h_base: Zq::new(4),
598 // h_parts: 3,
599 // };
600
601 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
602 // params.clone(),
603 // nu1_matrix.clone(),
604 // nu2_matrix.clone(),
605 // invalid_g_base,
606 // );
607
608 // assert!(result.is_err(), "Should reject g_base ≤ 1");
609 // match result {
610 // Err(HierarchicalError::InvalidBase(_)) => {} // Expected error
611 // _ => panic!("Wrong error type for invalid g_base"),
612 // }
613
614 // // Test invalid g_parts (0)
615 // let invalid_g_parts = GarbageParameters {
616 // g_base: Zq::new(8),
617 // g_parts: 0, // Invalid: parts must be > 0
618 // h_base: Zq::new(4),
619 // h_parts: 3,
620 // };
621
622 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
623 // params.clone(),
624 // nu1_matrix.clone(),
625 // nu2_matrix.clone(),
626 // invalid_g_parts,
627 // );
628
629 // assert!(result.is_err(), "Should reject g_parts = 0");
630 // match result {
631 // Err(HierarchicalError::InvalidPartCount(_)) => {} // Expected error
632 // _ => panic!("Wrong error type for invalid g_parts"),
633 // }
634
635 // // Test invalid h_base (≤ 1)
636 // let invalid_h_base = GarbageParameters {
637 // g_base: Zq::new(8),
638 // g_parts: 2,
639 // h_base: Zq::ZERO, // Invalid: base must be > 0
640 // h_parts: 3,
641 // };
642
643 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
644 // params.clone(),
645 // nu1_matrix.clone(),
646 // nu2_matrix.clone(),
647 // invalid_h_base,
648 // );
649
650 // assert!(result.is_err(), "Should reject h_base ≤ 1");
651 // match result {
652 // Err(HierarchicalError::InvalidBase(_)) => {} // Expected error
653 // _ => panic!("Wrong error type for invalid h_base"),
654 // }
655
656 // // Test invalid h_parts (0)
657 // let invalid_h_parts = GarbageParameters {
658 // g_base: Zq::new(8),
659 // g_parts: 2,
660 // h_base: Zq::new(4),
661 // h_parts: 0, // Invalid: parts must be > 0
662 // };
663
664 // let result = GarbagePolynomialCommitment::<TEST_M, TEST_N, TEST_D>::new(
665 // params.clone(),
666 // nu1_matrix.clone(),
667 // nu2_matrix,
668 // invalid_h_parts,
669 // );
670
671 // assert!(result.is_err(), "Should reject h_parts = 0");
672 // match result {
673 // Err(HierarchicalError::InvalidPartCount(_)) => {} // Expected error
674 // _ => panic!("Wrong error type for invalid h_parts"),
675 // }
676 // }
677}