from util import ( fcidump_to_integrals, norm1_paulisum, paulisum_hamil_coeffs, h1e_factorized, h2e_cholesky, factorized_to_standard_integrals, opnorm_cholesky_vec, h1e_1norm_cancellation, scientific_superscript, ) import numpy as np from scipy.special import binom import scipy import pandas import matplotlib.pyplot as plt def num_trotterstage(order: int): """ Compute the number of stages of the kth order Trotter formula. """ assert order == 1 or order % 2 == 0 if order == 1: return 1 else: return int(2 * 5 ** (order // 2 - 1)) def optimize_partition( epsilon: float, c_trotter: float, order: int, norms_remainder: np.ndarray, cost_trotterstage_det: np.ndarray, cost_paulirot_random: np.ndarray | float, xi: float = 0.1 * np.pi, ): """ Optimizes the cost of the partially randomized method. The nth entries of norms_remainder and cost_deterministic correspond to the nth partitioning H_tot = H_det + H_rem. The remainder is then decomposed as a sum of Paulis which are implemented randomly. :arg epsilon: the precision :arg c_trotter: the prefactor C_trotter of the trotter error C_trotter * delta^alpha :arg order: the order of the Trotter method to use :arg norms_remainder: 1-norm of the Pauli decomposition of H_rem :arg cost_trotterstage_det: cost to implement a single trotterstage of H_det :arg cost_paulirot_random: cost to implement a single Pauli-rotation sampled from H_rem. If it varies for different decompositions, an array of costs can be used as input. :arg xi: parameter for depth-reduction :return: the index of the optimal decomposition, its cost of the partially randomized method, the cost of the deterministic and the cost of the randomized part :rtype: np.ndarray, np.ndarray, np.ndarray """ if xi > 0.1 * np.pi or xi < 0: raise ValueError("xi must be positive and smaller 0.1*np.pi (ask Freek why)") if not hasattr(cost_paulirot_random, "__iter__"): cost_paulirot_random = np.ones_like(norms_remainder) * cost_paulirot_random res = { "num_terms_det": None, "total_cost": None, "kappa": None, "delta": None, "eps_qpe": None, "max_cost": None, "cost_det_all_partitions": None, "cost_rand_all_partitions": None, "norms_remainder_all_partitions": norms_remainder, "cost_trotterstage_det_all_partitions": cost_trotterstage_det, } k = 1 if order == 1 else order // 2 n_stage = num_trotterstage(order) # need to find the optimal values for kappa, number det. terms & delta: # 2-step procedure, compute opt. kappa and num_det for fix delta, then optimize # over delta in outer minimization layer def cost_func(delta: float, return_all=False): eps_qpe = np.sqrt(epsilon**2 - c_trotter**2 * delta ** (4 * k)) cost_det = ( 30 * n_stage * cost_trotterstage_det * (0.1 * np.pi) ** 2 / (eps_qpe * delta * xi) ) # xi = 1 corresponds to c_qpe = 0.1*pi, so 0.1*pi/xi adjusts for lower xi cost_rand = ( (280 / 9) * (0.1 * np.pi * norms_remainder / eps_qpe) ** 2 * cost_paulirot_random ) kappa = 1 + np.sqrt(1 + 2 * cost_det / cost_rand) cost_det = np.exp(2 / kappa) * cost_det cost_rand = kappa * np.exp(2 / kappa) * cost_rand cost = cost_det + cost_rand cut = np.argmin(cost[1:]) + 1 # don't account for the fully randomized point if return_all: return kappa, cost_det, cost_rand, cost, cut, eps_qpe else: return cost[cut] buffer = 1e-6 # avoid critical cases in optimization opt_res = scipy.optimize.minimize_scalar( cost_func, bounds=[buffer, np.sqrt(epsilon / c_trotter) - buffer], method="bounded", ) delta = opt_res.x kappa_all, cost_det_all, cost_rand_all, cost, cut, eps_qpe = cost_func( delta, return_all=True ) res["num_terms_det"] = cut res["total_cost"] = cost[cut] res["kappa_all_partitions"] = kappa_all res["delta"] = delta res["eps_qpe"] = eps_qpe res["cost_det_all_partitions"] = cost_det_all res["cost_rand_all_partitions"] = cost_rand_all max_cost_rand = ( kappa_all[cut] * (xi * norms_remainder[cut] / eps_qpe) ** 2 * cost_paulirot_random[cut] ) max_cost_det = ( n_stage * cost_trotterstage_det[cut] * xi / (2 * eps_qpe * delta) ) # divide by 2 because of +- trick res["max_cost"] = max_cost_rand + max_cost_det return res def toffoli_per_paulirot_hamming(len_commuting_pauliseq: float, num_ancilla: int): """ Computes the average toffoli cost per paulirotation in via Hamming weight phasing :arg len_commuting_pauliseq: The average length of fully commuting subsequences of Paulistrings :arg num_ancilla: The number J of ancilla qubits to use, related to the phase :return: average toffoli cost per paulirotation :rtype: float """ assert np.all(num_ancilla > 0) return 1 + (num_ancilla - 2) / len_commuting_pauliseq def cost_paulirot_random_hwphasing( epsilon: float, norms_remainder: np.ndarray, xi: float, p_commute: float ): """ Computes the cost per pauli rotation (in Toffolis) for the randomized part, based on Hamming-weight phasing :arg epsilon: the targeted accuracy :arg norms_remainder: the norm of the randomized parts for a list of partitions :arg xi: the depth reduction parameter :arg p_commute: probability that a randomly drawn pair of terms commutes (prob. dist. weighted by coeffs) :return: number of toffoli gates, number of ancillas :rtype: np.ndarray, int """ # remove ceil to make graphs smooth # J = np.ceil(np.log2(norms_remainder * xi / (np.pi * epsilon))) J = np.log2(norms_remainder * xi / (np.pi * epsilon)) J[J < 1] = 1 l = commuting_subsequence_length_qdrift(p_commute) # return toffoli_per_paulirot_hamming(l, J), 2 * J + np.ceil(l) - 2 return toffoli_per_paulirot_hamming(l, J), 2 * J + l - 2 def paulisum_partition( hamil, order: int, epsilon: float, c_trotter: float, xi=0.1 * np.pi, costmetric="toffoli", gate2_avg_per_paulirot=None, gate2_weighted_per_paulirot=None, p_commute=None, ): """ Finds the optimal partition of a Paulisum Hamiltonian into a deterministic and randomized part for the purpose of phase estimation via partially randomized time- evolution. :arg hamil: the full Hamiltonian :arg order: the Trotter order to use for the deterministic part :arg epsilon: the accuracy :arg c_trotter: the trotter error prefactor C s.t. error_trot <= C * delta^k :arg xi: the depth reduction parameter :arg costmetric: options are "toffoli" or "2qubit_gates" :arg gate2_avg_per_paulirot: average number of 2-qubit gates required to implement a pauli rotation :arg gate2_weighted_per_paulirot: number of 2-qubit gates required to implement a pauli rotation weighted by the coefficient of that term :arg p_commute: probability that a randomly drawn pair of terms commutes (prob. dist. weighted by coeffs) :return: dictionary with parameters of optimal partition :rtype: dict """ h0, h1e, h2e = fcidump_to_integrals(hamil) _, coeffs2, coeffs4 = paulisum_hamil_coeffs(h0, h1e, h2e) weights = np.sort(np.abs(np.concatenate((coeffs2, coeffs4))))[::-1] norms_remainder = np.sum(weights) - np.cumsum(np.sort(weights)[::-1]) norms_remainder[norms_remainder <= 0] = 1e-15 # avoid issue with logarithm below num_terms = len(weights) cost_paulirot_random = None cost_trotterstage_det = np.arange(num_terms) num_ancilla = 0 if costmetric == "toffoli": assert p_commute is not None cost_paulirot_random, num_ancilla = cost_paulirot_random_hwphasing( epsilon, norms_remainder, xi, p_commute ) cost_trotterstage_det *= num_toffoli_paulirot() elif costmetric == "2-qubit": assert gate2_weighted_per_paulirot is not None assert gate2_avg_per_paulirot is not None cost_paulirot_random = gate2_weighted_per_paulirot cost_trotterstage_det = cost_trotterstage_det * gate2_avg_per_paulirot elif costmetric == "paulirot": cost_paulirot_random = 1 res = optimize_partition( epsilon=epsilon, c_trotter=c_trotter, order=order, norms_remainder=norms_remainder, cost_trotterstage_det=cost_trotterstage_det, cost_paulirot_random=cost_paulirot_random, xi=xi, ) if costmetric == "toffoli": num_ancilla = num_ancilla[res["num_terms_det"]] res["num_ancilla"] = num_ancilla return res def num_toffoli_paulirot(): return toffoli_per_1qubitrot() def toffoli_per_1qubitrot(): return 25 # Wan et al. appendix F def num_toffoli_orbital_rotation(norb1, norb2): """ Orbital rotation of norb1 orbitals, but need to get only norb2 <= norb1 orbitals correctly """ givens_rot = norb1 * (norb1 - 1) - (norb1 - norb2) * (norb1 - norb2 - 1) return 2 * toffoli_per_1qubitrot() * givens_rot def num_toffoli_diagonal_hamiltonian(norb): return toffoli_per_1qubitrot() * norb * (norb - 1) def paulisum_partition_bitwise( hamil, order: int, epsilon: float, p_commute: float, c_trotter: float, num_bits_hwphasing: int, len_comm_seq: int, xi: float = 0.1 * np.pi, ): """ Finds the optimal partition of a Paulisum Hamiltonian into a deterministic and randomized part for the purpose of phase estimation via partially randomized time- evolution. The deterministic terms are expanded bitwise and Hamming-weight phasing is applied. :arg hamil: the full Hamiltonian :arg order: the Trotter order to use for the deterministic part :arg epsilon: the accuracy :arg p_commute: probability that a randomly drawn pair of terms commutes (prob. dist. weighted by coeffs) :arg c_trotter: the trotter error prefactor C s.t. error_trot <= C * delta^k :arg num_bits_hwphasing: the number of bits to implement deterministically :arg len_comm_seq: the length of sequences of commuting terms in deterministic part of the Hamiltonian :arg xi: depth reduction parameter :return: number of deterministic terms, full cost (cost deterministic, cost randomized) :rtype: int, float """ h0 = hamil[0] h1e = hamil[1] h2e = hamil[2] _, coeffs2, coeffs4 = paulisum_hamil_coeffs(h0, h1e, h2e) weights = np.sort(np.abs(np.concatenate((coeffs2, coeffs4))))[::-1] # bit expansion num_ancillae = [] weights_new = [] cost_trotterstage = [] bits_used = [] max_bit = np.floor(np.log2(weights[0])) bits = np.arange(max_bit - num_bits_hwphasing + 1, max_bit + 1)[::-1] cost_trotterstage_prev_bit = 0 num_ancilla_prev_bit = 0 bits_used_prev = [] K = len_comm_seq / 2 # only half of the terms have bit = 1 for bit in bits: weights_curr = weights - weights % 2**bit weights_curr = weights_curr[weights_curr > 0] if len(weights_curr) == 0: continue weights_new += list(weights_curr) weights = weights % 2**bit # cost to implement this precision J = max(-bit, 1) num_ancillae += len(weights_curr) * [num_ancilla_prev_bit + 2 * J] bits_used += len(weights_curr) * [bits_used_prev + [bit]] toffoli_per_paulirot_curr = toffoli_per_paulirot_hamming(K, J) cost_trotterstage_curr = ( cost_trotterstage_prev_bit + np.arange(1, len(weights_curr) + 1) * toffoli_per_paulirot_curr ) cost_trotterstage_prev_bit = cost_trotterstage_curr[-1] num_ancilla_prev_bit = num_ancillae[-1] bits_used_prev = bits_used[-1] cost_trotterstage += list(cost_trotterstage_curr) remainder = weights onenorm_rem = np.sum(remainder) weights = weights_new norms_remainder = np.sum(weights) - np.cumsum(np.sort(weights)[::-1]) + onenorm_rem norms_remainder[norms_remainder <= 0] = 1e-15 # avoid issue with logarithm below cost_paulirot_rand, num_anc_rand = cost_paulirot_random_hwphasing( epsilon, norms_remainder, xi, p_commute ) cost_trotterstage = np.array(cost_trotterstage) res = optimize_partition( epsilon=epsilon, c_trotter=c_trotter, order=order, norms_remainder=norms_remainder, cost_trotterstage_det=cost_trotterstage, cost_paulirot_random=cost_paulirot_rand, xi=xi, ) num_ancillae = num_ancillae[res["num_terms_det"]] num_ancillae += K - 2 num_anc_rand = num_anc_rand[res["num_terms_det"]] bits_used = bits_used[res["num_terms_det"]] if num_anc_rand not in bits_used: num_ancillae += num_anc_rand res["num_ancilla"] = num_ancillae return res def doublefact_partition( hamil, order: int, epsilon: float, c_trotter: float, p_commute: float, xi: float = 0.1 * np.pi, thresh_2ndfact: float = 0.0, verbose: bool = False, num_cvec_max: int = None, ): """ """ # sort and truncate the number of cholesky vectors, define initial remainder h0 = hamil[0] h1e = hamil[1] h2e = hamil[2] h1e_fact = h1e_factorized(h1e, h2e) cvecs = h2e_cholesky(h2e) full_onenorm = norm1_paulisum(hamil) if verbose: print("full onenorm: ", full_onenorm) # norms_remainder = [full_onenorm] # cost_trotterstage_det = [0] norms_remainder = [] cost_trotterstage_det = [] if num_cvec_max is None: num_cvec_max = cvecs.shape[0] opnorms_cvecs = np.array([opnorm_cholesky_vec(cvec) for cvec in cvecs]) cvecs = cvecs[np.argsort(opnorms_cvecs)][::-1] truncated_cvecs = np.zeros_like(cvecs) h1e_zero = np.zeros_like(h1e_fact) norb = h1e_zero.shape[0] hamil_remainder = (0, h1e_zero, h2e) h1e_cancel = h1e_1norm_cancellation(hamil_remainder) hamil_remainder = (0, -h1e_cancel, h2e) norms_remainder.append(norm1_paulisum(hamil_remainder)) if verbose: print("onenorm without 1-el term", norm1_paulisum(hamil_remainder)) cost_1el_phase_gates = ( norb # normally 2*norb, here it's at start and end of trotterstep ) cost_trotterstage_det.append( cost_1el_phase_gates + num_toffoli_orbital_rotation(norb, norb) ) for i, cvec in enumerate(cvecs[:num_cvec_max]): eigvals2, o = np.linalg.eigh(cvec) sumeigvals = np.sum(np.abs(eigvals2)) # truncate eigenvalues of 2nd factorization eigvals2_trunc = eigvals2 trunc_idx = np.where(np.abs(eigvals2) * sumeigvals < thresh_2ndfact)[0] eigvals2_trunc[trunc_idx] = 0 num_orb_trunc = norb - len(trunc_idx) if num_orb_trunc == 0: break truncated_cvecs[i] = o @ (np.eye(norb) * eigvals2_trunc) @ o.T # compute remainder norm and cost of current deterministic term hamil_det = factorized_to_standard_integrals(h1e_fact, truncated_cvecs) hamil_remainder = (0, h1e - hamil_det[0], h2e - hamil_det[1]) h1e_cancel = h1e_1norm_cancellation(hamil_remainder) hamil_remainder = (0, hamil_remainder[1] - h1e_cancel, hamil_remainder[2]) if verbose: print(f"cvec {i}, norm remainder:", norm1_paulisum(hamil_remainder)) norms_remainder.append(norm1_paulisum(hamil_remainder)) cost_i = ( num_toffoli_diagonal_hamiltonian(num_orb_trunc) + num_toffoli_orbital_rotation(norb, num_orb_trunc) + cost_trotterstage_det[-1] ) cost_trotterstage_det.append(cost_i) cost_paulirot_random, num_ancilla = cost_paulirot_random_hwphasing( epsilon, np.array(norms_remainder), xi, p_commute ) res = optimize_partition( epsilon=epsilon, c_trotter=c_trotter, order=order, cost_trotterstage_det=np.array(cost_trotterstage_det), norms_remainder=np.array(norms_remainder), cost_paulirot_random=cost_paulirot_random, xi=xi, ) res["num_ancilla"] = num_ancilla[res["num_terms_det"]] return res def commuting_subsequence_length_qdrift(prob_commute: float): """ Estimates the average length of fully commuting subsequences of Paulistrings in qDrift. More concretely, if the probability that a randomly drawn pair of Paulistrings commutes is p, this function computes Σ_n=1^∞ n · p^(n choose 2) · (1 - p^n) :arg prob_commute: the probability that a randomly drawn pair of Paulistrings commutes :return: the average length of fully commuting subsequences :rtype: float """ n_avg = 0 n = 1 while True: n_curr = n * prob_commute ** (binom(n, 2)) * (1 - prob_commute**n) n_avg += n_curr if n_curr < 1e-9: break n += 1 return n_avg def paulisum_partition( hamil, order: int, epsilon: float, c_trotter: float, xi=0.1 * np.pi, costmetric="toffoli", gate2_avg_per_paulirot=None, gate2_weighted_per_paulirot=None, p_commute=None, ): """ Finds the optimal partition of a Paulisum Hamiltonian into a deterministic and randomized part for the purpose of phase estimation via partially randomized time- evolution. :arg hamil: the full Hamiltonian :arg order: the Trotter order to use for the deterministic part :arg epsilon: the accuracy :arg c_trotter: the trotter error prefactor C s.t. error_trot <= C * delta^k :arg xi: the depth reduction parameter :arg costmetric: options are "toffoli" or "2qubit_gates" :arg gate2_avg_per_paulirot: average number of 2-qubit gates required to implement a pauli rotation :arg gate2_weighted_per_paulirot: number of 2-qubit gates required to implement a pauli rotation weighted by the coefficient of that term :arg p_commute: probability that a randomly drawn pair of terms commutes (prob. dist. weighted by coeffs) :return: dictionary with parameters of optimal partition :rtype: dict """ h0 = hamil[0] h1e = hamil[1] h2e = hamil[2] _, coeffs2, coeffs4 = paulisum_hamil_coeffs(h0, h1e, h2e) weights = np.sort(np.abs(np.concatenate((coeffs2, coeffs4))))[::-1] norms_remainder = np.sum(weights) - np.cumsum(np.sort(weights)[::-1]) norms_remainder[norms_remainder <= 0] = 1e-15 # avoid issue with logarithm below num_terms = len(weights) cost_paulirot_random = None cost_trotterstage_det = np.arange(num_terms) num_ancilla = 0 if costmetric == "toffoli": assert p_commute is not None cost_paulirot_random, num_ancilla = cost_paulirot_random_hwphasing( epsilon, norms_remainder, xi, p_commute ) cost_trotterstage_det *= num_toffoli_paulirot() elif costmetric == "2-qubit": assert gate2_weighted_per_paulirot is not None assert gate2_avg_per_paulirot is not None cost_paulirot_random = gate2_weighted_per_paulirot cost_trotterstage_det = cost_trotterstage_det * gate2_avg_per_paulirot elif costmetric == "paulirot": cost_paulirot_random = 1 res = optimize_partition( epsilon=epsilon, c_trotter=c_trotter, order=order, norms_remainder=norms_remainder, cost_trotterstage_det=cost_trotterstage_det, cost_paulirot_random=cost_paulirot_random, xi=xi, ) if costmetric == "toffoli": num_ancilla = num_ancilla[res["num_terms_det"]] res["num_ancilla"] = num_ancilla return res def compute_threshold( cost_paulirot_rand: float, onenorm: float, epsilon: float, c_trotter: float, xi: float, verbose: bool = False, ): """ Compute the threshold for the Toffoli cost of the deterministic part of a trotterstage, i.e. the threshold below which the partial random method results in a lower cost than pure qdrift. Assumes 2nd order Trotter. The threshold is computed as a function of the remainder one-norm. :arg cost_paulirot_rand: The expected Toffoli cost of a pauli-term sampled from the randomized part :arg onenorm: the full onenorm of the Hamiltonian :arg epsilon: targeted precision :arg c_trotter: the 2nd order Trotter prefactor, i.e. C such that error_trotter = C * trotterstepsize^2 :arg xi: the depth reduction parameter :arg verbose: :return: remainder onenorms corresponding to a sequence of partitions :rtype: (np.ndarray, np.ndarray) """ if verbose: print("\n### Computing threshold ###") c_qpe_qdrift = 8 cost_qdrift = cost_paulirot_rand * c_qpe_qdrift * (onenorm / epsilon) ** 2 norms_remainder_scan = np.concatenate( ( np.linspace(start=1e-3, stop=onenorm, num=50)[:-2], np.linspace(start=onenorm * 48 / 50, stop=onenorm, num=20)[:-1], ) ) kappa = lambda cost_det, cost_rand: 1 + np.sqrt(1 + 2 * cost_det / cost_rand) eps_qpe = lambda delta: np.sqrt(epsilon**2 - c_trotter**2 * delta**4) def cost(norm_remainder, delta, cost_trotterstage_det): cost_det = ( 30 * 2 * cost_trotterstage_det * (0.1 * np.pi) ** 2 / (eps_qpe(delta) * delta * xi) ) cost_rand = ( (280 / 9) * (0.1 * np.pi * norm_remainder / eps_qpe(delta)) ** 2 * cost_paulirot_rand ) kap = kappa(cost_det, cost_rand) return np.exp(2 / kap) * cost_det + kap * np.exp(2 / kap) * cost_rand cost_trotterstage_det_thresh = [] for norm_rem in norms_remainder_scan: if verbose: print(f"\n{norm_rem=}") deltas = np.linspace( start=1e-8, stop=np.sqrt(epsilon / c_trotter) - 1e-8, num=10000 ) def cost_delta_optimization(cost_trotterstage_det: float, return_delta=False): delta_opt = deltas[np.argmin(cost(norm_rem, deltas, cost_trotterstage_det))] diff = np.abs( cost(norm_rem, delta_opt, cost_trotterstage_det) - cost_qdrift ) if return_delta: return diff, delta_opt else: return diff opt_res = scipy.optimize.minimize_scalar( cost_delta_optimization, bounds=[1e-3, cost_qdrift], method="bounded", tol=1e-9, ) cost_trotterstage_det_curr = opt_res.x[0] rel_diff = opt_res.fun / cost_qdrift if verbose: print("optimization success: ", opt_res.success) print("optimized cost trotterstage det: ", cost_trotterstage_det_curr) print("remaining relative difference to qdrift cost: ", rel_diff) _, delta_curr = cost_delta_optimization(cost_trotterstage_det_curr, True) if rel_diff < 1e-2: cost_trotterstage_det_thresh.append(cost_trotterstage_det_curr) else: cost_trotterstage_det_thresh.append(np.nan) return norms_remainder_scan, cost_trotterstage_det_thresh def analyse_partrand( hamil, c_trotter: float, epsilon: float, methods: list[str], order: int = 2, xi=0.1 * np.pi, costmetric="toffoli", gate2_avg_per_paulirot=None, gate2_weighted_per_paulirot=None, num_bits_hwphasing_det: int = None, len_comm_seq_det: int = None, p_commute=None, thresh_2ndfact: float = None, num_cvec_max: int = None, verbose: bool = False, ): if costmetric != "toffoli": raise ValueError("only 'toffoli' costmetric works (I think)") # qdrift cost c_qpe_qdrift = 8 # appendix B onenorm = norm1_paulisum(hamil) if costmetric == "toffoli": cost_paulirot_random, num_ancilla_qdrift = cost_paulirot_random_hwphasing( epsilon=epsilon, norms_remainder=np.array([onenorm]), xi=xi, p_commute=p_commute, ) elif costmetric == "2-qubit": cost_paulirot_random = gate2_weighted_per_paulirot num_ancilla_qdrift = 0 else: raise ValueError("costmetric ", costmetric, " not implemented") cost_qdrift = cost_paulirot_random[0] * c_qpe_qdrift * (onenorm / epsilon) ** 2 max_cost_qdrift = cost_paulirot_random[0] * (xi * onenorm / epsilon) ** 2 print("\n### qDrift ###") print(f"cost_qdrift: {scientific_superscript(cost_qdrift, 2)}") print(f"Maximal number of Toffoli: {scientific_superscript(max_cost_qdrift,2)}") print(f"num_ancilla =", num_ancilla_qdrift[0]) res_methods = {method: None for method in methods} res_methods["qDrift"] = { "num_ancilla": num_ancilla_qdrift, "total_cost": cost_qdrift, "max_cost": max_cost_qdrift, } for i, method in enumerate(methods): if method == "paulisum": res = paulisum_partition( hamil=hamil, c_trotter=c_trotter, epsilon=epsilon, order=order, xi=xi, costmetric=costmetric, gate2_avg_per_paulirot=gate2_avg_per_paulirot, gate2_weighted_per_paulirot=gate2_weighted_per_paulirot, p_commute=p_commute, ) label = "Pauli sum" elif method == "paulisum_bitwise": assert costmetric == "toffoli" res = paulisum_partition_bitwise( hamil=hamil, c_trotter=c_trotter, epsilon=epsilon, order=order, xi=xi, p_commute=p_commute, num_bits_hwphasing=num_bits_hwphasing_det, len_comm_seq=len_comm_seq_det, ) label = "Pauli sum bitwise" elif method == "doublefact": res = doublefact_partition( hamil=hamil, order=order, epsilon=epsilon, c_trotter=c_trotter, p_commute=p_commute, xi=xi, thresh_2ndfact=thresh_2ndfact, verbose=verbose, num_cvec_max=num_cvec_max, ) label = "Double fact. with truncation" res_methods[method] = res cost_det = res["cost_det_all_partitions"] cost_rand = res["cost_rand_all_partitions"] num_terms = int(res["num_terms_det"]) delta = float(res["delta"]) num_ancilla = int(res["num_ancilla"]) if method in ["paulisum", "paulisum_bitwise"]: nmax = len(cost_det) // 10 else: nmax = len(cost_det) norms_remainder = res["norms_remainder_all_partitions"] cost_det_opt_partition = res["cost_det_all_partitions"][num_terms] cost_rand_opt_partition = res["cost_rand_all_partitions"][num_terms] norm_remainder_opt_partition = norms_remainder[num_terms] print("\n### " + method + " ###") print( f"cost part rand {method}: {scientific_superscript(res['total_cost'], 2)}" ) print( f" --> of which fraction det: {scientific_superscript(cost_det_opt_partition/res['total_cost'], 2)}" ) print( f" --> of which fraction rand: {scientific_superscript(cost_rand_opt_partition/res['total_cost'], 2)}" ) print( f"Maximal number of Toffoli partially random: {scientific_superscript(res['max_cost'],2)}" ) print(f"{num_ancilla=}") print(f"{delta=}") print("fraction norm remainder:", norm_remainder_opt_partition / onenorm) print(f"num terms det:", num_terms) if method != "doublefact": print(f" --> fraction of total num terms:", num_terms / len(cost_det)) norms_remainder_thresh, cost_trotterstage_det_thresh = compute_threshold( cost_paulirot_rand=cost_paulirot_random, onenorm=onenorm, epsilon=epsilon, c_trotter=c_trotter, xi=xi, verbose=verbose, ) return res_methods def analysis(xis=[0.1 * np.pi], thresh_2ndfact=0.01, num_cvec_max=100, save_csv=None): epsilon = 1.5e-3 # reserve 1e-4 for synthesis error order = 2 c_trotter = lambda hamil: (1.08e-4 * norm1_paulisum(hamil) ** 1.248) num_bits_hwphasing = 22 p_commute = 0.95 num_orbs = np.arange(10, 80, step=5, dtype=int) cost_qdrift_tot = [] cost_qdrift_max = [] num_ancilla_qdrift = [] cost_partrand_tot = [] cost_partrand_max = [] num_ancilla_partrand = [] for xi in xis: for num_orb in num_orbs: hamil = fcidump_to_integrals(f"fcidump/CAS{num_orb}_min_FCIDUMP") len_comm_seq_det = num_orb // 2 print(f"\n### num orb = {num_orb} ### ") print( "avg commuting subsequence length random:", commuting_subsequence_length_qdrift(p_commute), ) print("avg commuting subsequence length det:", len_comm_seq_det) res = analyse_partrand( hamil=hamil, epsilon=epsilon, c_trotter=c_trotter(hamil), order=order, methods=["paulisum", "paulisum_bitwise", "doublefact"], num_bits_hwphasing_det=num_bits_hwphasing, len_comm_seq_det=len_comm_seq_det, p_commute=p_commute, xi=xi, thresh_2ndfact=thresh_2ndfact, verbose=False, num_cvec_max=num_cvec_max, ) cost_qdrift_tot.append(res["qDrift"]["total_cost"]) cost_qdrift_max.append(res["qDrift"]["max_cost"]) num_ancilla_qdrift.append(res["qDrift"]["num_ancilla"][0]) cost_partrand_tot.append(res["paulisum_bitwise"]["total_cost"]) cost_partrand_max.append(res["paulisum_bitwise"]["max_cost"]) num_ancilla_partrand.append(res["paulisum_bitwise"]["num_ancilla"]) res = { "num_orbs": np.tile(num_orbs, len(xis)), "cost_qdrift_tot": cost_qdrift_tot, "cost_qdrift_max": cost_qdrift_max, "num_ancilla_qdrift": num_ancilla_qdrift, "cost_partrand_tot": cost_partrand_tot, "cost_partrand_max": cost_partrand_max, "num_ancilla_partrand": num_ancilla_partrand, "xi": np.repeat(xis, len(num_orbs)), } if save_csv is not None: df = pandas.DataFrame(res) df.to_csv(save_csv, index=False) def main(): analysis(xis=[0.05, 0.1, 0.1 * np.pi], save_csv="partrand_qdrift_cost.csv") plt.show() if __name__ == "__main__": main()