# # Factorization of sparse bivariate polynomials for Maple 12 # # File: convex-dense_bivariate_factorization.mpl # Author: Jérémy Berthomieu # Address: LIX, École polytechnique, Route de Saclay, # 91128 Palaiseau Cedex, France # Date: October 2010 # Licence: GPL v2 # # Example: # convex_dense_factor (x^10 + y*x^10 + 1, x, y) with (LinearAlgebra): with (simplex): kernelopts(assertlevel=1); support := proc (p::polynom, x, y, c)::list; local m; c:= [coeffs (expand (p), [x, y], m)]; return seq ([degree (z, x), degree (z, y)], z = m); end proc: naive_reduce := proc (S_arg, eta, U) local o_x, o_y, d_x, d_y, S, lambda, inverse_lambda, swap_matrix, swap_counter, tmp, b, d, f, h; o_x := min (seq (z[1], z in S_arg)); o_y := min (seq (z[2], z in S_arg)); d_x := max (seq (z[1], z in S_arg)) - o_x; d_y := max (seq (z[2], z in S_arg)) - o_y; S := [seq (z - [o_x, o_y], z= S_arg)]; lambda := Matrix (2, 2, [[1, -1], [0, 1]]); inverse_lambda := Matrix (2, 2, [[1, 1], [0, 1]]); swap_matrix := Matrix (2, 2, [[0, 1], [1, 0]]); U := Matrix (2, 2, [[1, 0], [0, 1]]); swap_counter:= 0; while (true) do if (d_x < d_y) then swap_counter := swap_counter + 1; S := [seq ([z[2], z[1]], z= S)]; U := evalm (swap_matrix &* U); tmp := d_x; d_x := d_y; d_y := tmp; end if; b := d_x - max (seq (z[1]-z[2], z= S)); d := d_x + d_y - max (seq (z[1]+z[2], z= S)); f := d_y + min (seq (z[1]-z[2], z= S)); h := min (seq (z[1]+z[2], z= S)); if (b + f > (1 + eta) * d_y) then S := [seq (z + [d_y - f - z[2], 0], z in S)]; U := evalm (lambda &* U); d_x := d_x + d_y - b - f; elif (d + h > (1 + eta) * d_y) then S := [seq (z + [-h + z[2], 0], z in S)]; U := evalm (inverse_lambda &* U); d_x := d_x + d_y - d - h; else return S; end if; end do; end proc: dichotomic_reduce := proc (S_arg, eta, U) local o_x, o_y, d_x, d_y, S, lambda, inverse_lambda, swap_matrix, swap_counter, tmp, b, d, f, h, m, M, flag; o_x := min (seq (z[1], z in S_arg)); o_y := min (seq (z[2], z in S_arg)); d_x := max (seq (z[1], z in S_arg)) - o_x; d_y := max (seq (z[2], z in S_arg)) - o_y; S := [seq (z - [o_x, o_y], z= S_arg)]; lambda := q -> Matrix (2, 2, [[1, -q], [0, 1]]); inverse_lambda := q -> Matrix (2, 2, [[1, q], [0, 1]]); swap_matrix := Matrix (2, 2, [[0, 1], [1, 0]]); U := Matrix (2, 2, [[1, 0], [0, 1]]); m := floor (log[2] (d_x / (eta * d_y))); swap_counter:= 0; while (true) do if (d_x < d_y) then swap_counter := swap_counter + 1; S := [seq ([z[2], z[1]], z= S)]; U := evalm (swap_matrix &* U); tmp := d_x; d_x := d_y; d_y := tmp; m := floor (log[2] (d_x / (eta * d_y))); end if; if (m < 0) then return S; end if; M := 2^m; b := d_x - max (seq (z[1] - M * z[2], z= S)); d := d_x + M * d_y - max (seq (z[1] + M * z[2], z= S)); f := M * d_y + min (seq (z[1] - M * z[2], z= S)); h := min (seq (z[1] + M * z[2], z= S)); if (b + f > M * (1 + eta) * d_y) then S := [seq (z + [M*(d_y - z[2]) - f, 0], z in S)]; U := evalm (lambda(M) &* U); d_x := d_x + M*d_y - b - f; elif (d + h > M * (1 + eta) * d_y) then S := [seq (z + [M*z[2] - h, 0], z in S)]; U := evalm (inverse_lambda(M) &* U); d_x := d_x + M*d_y - d - h; end if; m := m-1; end do; end proc: support_apply_and_normalize := proc (U, f, x, y) local c, z, S, o_x, o_y, Z; Z:= [support (f, x, y, c)]; S:= [seq (evalm (U &* z), z= Z)]; o_x := min (seq (z[1], z in S)); o_y := min (seq (z[2], z in S)); S:= [seq ([z[1], z[2]] - [o_x, o_y], z in S)]; add (c[i] * x^S[i][1] * y^S[i][2], i=1..nops(S)); end proc: convex_dense_factor := proc (f::polynom, x, y) local c, S, g, v, h; S := convexhull ([support (f, x, y, c)]); naive_reduce (S, 0, 'U'); # dichotomic_reduce (S, 1/4, 'U'); g := support_apply_and_normalize (U, f, x, y); # print ("partial degrees ", degree (f, x), degree (f, y), # " reduced to ", degree (g, x), degree (g, y)); v := factor (g); # For bench purposes forget (factor, g); if not type (v, `*`) then return f end if; # print (U); h := mul (support_apply_and_normalize (U^(-1), z, x, y), z= v); return (x^(degree (f, x) - degree (h, x)) * y^(degree (f, y) - degree (h, y))*h); end proc: sample_pol := proc (d) local P, F, G, H, i, roll, r; F := x^(d + 1); G := y^(d + 1); H := x^(floor (d/2)-1)*y^(floor (d/2)-1); for i from 0 to d do F := F + i*x^i*y^(d-i); G := G + (d-i)*x^i*y^(d-i); H := H + x^i*y^(d-i); end do; P := expand (F*G*H); return P; # lambda := Matrix (2, 2, [[1, -1], [0, 1]]); # inverse_lambda := Matrix (2, 2, [[1, 1], [0, 1]]); # swap_matrix := Matrix (2, 2, [[0, 1], [1, 0]]); # U := evalm ((lambda &* swap_matrix)^d); # return support_apply_and_normalize (U, P, x, y); end proc: bench := proc (d) local st, P, F, count; count := 0; st := time (); P := sample_pol (d); while ((time () - st) < 3) do # print (P); # factor (P); forget (factor, P); F := convex_dense_factor (P, x, y); ASSERT (expand (F - P) = 0, "bug"); count := count + 1; end do; st := time () - st; return st / count; end proc: for i from 3 to 7 do print ("degree", 2^i, bench (2^i), "seconds"); end do; # trace (convex_dense_factor): # trace (support_apply_and_normalize): # trace (naive_reduce): # trace (dichotomic_reduce): # trace (support): test_convex_dense_factor := proc (P::polynom, x, y) local F; F:= convex_dense_factor (P, x, y); ASSERT (expand (F - P) = 0, "bug"); end proc: # test_convex_dense_factor (expand (2*y*(1+x*y+x^5*y^2)*(x^8-y^8)), x, y); # convex_dense_factor (expand (3*x^20*(y^10*x^10 + 1 + x) * (x - y)*(x^5 + y)), # x, y); # convex_dense_factor (expand (x^144*y^213 + x^115*y^170 + x^261*y^386), x, y);