Skip to content

Probability Tables

oqd_heisenberg_ion.simulators.qmc.long_range.preprocess

probability_table

base

ProbabilityTable

ProbabilityTable base class to generate probability table files

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/base.py
class ProbabilityTable:
    """ProbabilityTable base class to generate probability table files"""

    args = {}

    def __init__(self, system, **sampling_args):
        """
        constructor builds the geometry and sets the base member variables

        Args:
            system (System): object representing the system to be simulated
            **sampling_args (dict): key word arguments specifying the sampling parameters
        """

        self.system = system
        self.system.geometry.build()

        self.sampling_parameters = sampling_args

        self.spectrum_offset = None
        self.max_diag_norm = None
        self.max_over_states = None

    def validate_system(self):
        """
        validates the system associated with the ProbabilityTable object

        Raises:
            Exception: if geometry is not long range, since probability tables are only required for long range geometries
            Exception: if the energy scale J is less than zero since that can result in negative probabilities
        """

        if not self.system.geometry.interaction_range == "long_range":
            raise Exception("Probability tables are constructed for long range systems\n")

        if self.system.hamiltonian_parameters.J <= 0:
            raise Exception("J sets the energy scale. It must be positive for long-range QMC")
        pass

    def build(self):
        """
        each ProbabilityTable subclass must implement a build method to populate tables
        """
        pass

    def write_to_files(self, out_dir):
        """
        makes the directories required for writing probability tables. Writing logic owned by subclasses

        Args:
            out_dir (str): directory path for probability tables
        """

        self.out_dir = out_dir
        self.prob_dir = os.path.join(self.out_dir, "probability_densities/")
        os.makedirs(self.prob_dir)
__init__

constructor builds the geometry and sets the base member variables

Parameters:

  • system (System) –

    object representing the system to be simulated

  • **sampling_args (dict, default: {} ) –

    key word arguments specifying the sampling parameters

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/base.py
def __init__(self, system, **sampling_args):
    """
    constructor builds the geometry and sets the base member variables

    Args:
        system (System): object representing the system to be simulated
        **sampling_args (dict): key word arguments specifying the sampling parameters
    """

    self.system = system
    self.system.geometry.build()

    self.sampling_parameters = sampling_args

    self.spectrum_offset = None
    self.max_diag_norm = None
    self.max_over_states = None
validate_system

validates the system associated with the ProbabilityTable object

Raises:

  • Exception

    if geometry is not long range, since probability tables are only required for long range geometries

  • Exception

    if the energy scale J is less than zero since that can result in negative probabilities

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/base.py
def validate_system(self):
    """
    validates the system associated with the ProbabilityTable object

    Raises:
        Exception: if geometry is not long range, since probability tables are only required for long range geometries
        Exception: if the energy scale J is less than zero since that can result in negative probabilities
    """

    if not self.system.geometry.interaction_range == "long_range":
        raise Exception("Probability tables are constructed for long range systems\n")

    if self.system.hamiltonian_parameters.J <= 0:
        raise Exception("J sets the energy scale. It must be positive for long-range QMC")
    pass
build

each ProbabilityTable subclass must implement a build method to populate tables

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/base.py
def build(self):
    """
    each ProbabilityTable subclass must implement a build method to populate tables
    """
    pass
write_to_files

makes the directories required for writing probability tables. Writing logic owned by subclasses

Parameters:

  • out_dir (str) –

    directory path for probability tables

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/base.py
def write_to_files(self, out_dir):
    """
    makes the directories required for writing probability tables. Writing logic owned by subclasses

    Args:
        out_dir (str): directory path for probability tables
    """

    self.out_dir = out_dir
    self.prob_dir = os.path.join(self.out_dir, "probability_densities/")
    os.makedirs(self.prob_dir)

deterministic

Deterministic

Bases: ProbabilityTable

ProbabilityTable subclass for deterministic sampling

Raises:

  • Exception

    raised if deterministic probability tables are requested for an unsupported hamiltonian type

  • ValueError

    raised if the spectrum offset is requested but the hamiltonian type is not consistent with sampling type

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/deterministic.py
class Deterministic(ProbabilityTable):
    """
    ProbabilityTable subclass for deterministic sampling

    Raises:
        Exception: raised if deterministic probability tables are requested for an unsupported hamiltonian type
        ValueError: raised if the spectrum offset is requested but the hamiltonian type is not consistent with sampling type
    """

    args = {}
    allowed_hamiltonians = {"XY", "fm_heisenberg_afm_Z", "fm_heisenberg_fm_Z"}

    def __init__(self, system):
        """
        validates inputs and constructs the probability tables

        Args:
            system (System): object representing the system to be simulated
        """

        super().__init__(system)

        self.validate_system()

        self.build()

    def validate_system(self):
        """
        validates the system associated with the ProbabilityTable instance

        Raises:
            Exception: if hamiltonian name does not match the allowed hamiltonians for deterministic sampling
        """

        super().validate_system()

        hamiltonian_name = self.system.hamiltonian_parameters.hamiltonian_name

        if hamiltonian_name not in self.allowed_hamiltonians:
            raise Exception(
                "Inconsistent hamiltonian and sampling types. Deterministic probability tables "
                "only support the following types: {}".format(self.allowed_hamiltonians)
            )

    def build(self):
        """
        populates the probability tables for deterministic sampling
        """

        self.compute_max_over_states(self.system.geometry.num_bonds, self.system.interactions.J_ij_vector)
        self.compute_spectrum_offset(self.system.hamiltonian_parameters.hamiltonian_name)

    def compute_spectrum_offset(self, hamiltonian_name):
        """
        computes the spectrum offset associated with the hamiltonian for SSE

        Args:
            hamiltonian_name (str): represents the name of the Hamiltonian

        Raises:
            ValueError: if the spectrum is requested and the Hamiltonian can not be used with deterministic sampling
        """

        if hamiltonian_name == "XY":
            self.spectrum_offset = self.max_diag_norm
        elif hamiltonian_name == "fm_heisenberg_afm_Z" or hamiltonian_name == "fm_heisenberg_fm_Z":
            self.spectrum_offset = 0.5 * self.max_diag_norm
        else:
            raise ValueError(
                "Invalid Hamiltonian type: {} provided for deterministic probability tables. "
                "Allowed types are {}".format(hamiltonian_name, self.allowed_hamiltonians)
            )

    def compute_max_over_states(self, num_bonds, J_ij_vector):
        """
        computes the max norm probability table required for diagonal updates in the two-step method.
        See: https://scipost.org/submissions/2107.00766v1/ and https://journals.aps.org/pre/abstract/10.1103/PhysRevE.66.046701 for details

        Args:
            num_bonds (int): number of interacting bonds in the lattice
            J_ij_vector (numpy.ndarray[float]): num_bonds x 1 array containing the coupling strengths
        """

        max_over_states = np.zeros(num_bonds)
        max_diag_norm = 0.0

        for bond in range(num_bonds):
            J_ij = J_ij_vector[bond]
            max_over_states[bond] = 0.5 * J_ij

            max_diag_norm += 0.5 * J_ij

        self.max_over_states = max_over_states / max_diag_norm
        self.max_diag_norm = max_diag_norm

    def write_to_files(self, out_dir):
        """
        writes the probability tables to files for deterministic sampling

        Args:
            out_dir (str): file path for writing probability tables
        """

        super().write_to_files(out_dir)

        geometry_file_name = os.path.join(self.prob_dir, "geometry.csv")
        max_over_states_file_name = os.path.join(self.prob_dir, "max_over_states.csv")

        geometry_table = self.system.geometry.geometry_table
        num_bonds = self.system.geometry.num_bonds

        np.savetxt(geometry_file_name, geometry_table, delimiter=",", fmt="%d", header="NumBonds={}".format(num_bonds))

        header = "norm={},spectrum_offset={},loop_update_type={}".format(
            self.max_diag_norm, self.spectrum_offset, "deterministic"
        )

        np.savetxt(max_over_states_file_name, self.max_over_states, delimiter=",", header=header)
__init__

validates inputs and constructs the probability tables

Parameters:

  • system (System) –

    object representing the system to be simulated

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/deterministic.py
def __init__(self, system):
    """
    validates inputs and constructs the probability tables

    Args:
        system (System): object representing the system to be simulated
    """

    super().__init__(system)

    self.validate_system()

    self.build()
validate_system

validates the system associated with the ProbabilityTable instance

Raises:

  • Exception

    if hamiltonian name does not match the allowed hamiltonians for deterministic sampling

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/deterministic.py
def validate_system(self):
    """
    validates the system associated with the ProbabilityTable instance

    Raises:
        Exception: if hamiltonian name does not match the allowed hamiltonians for deterministic sampling
    """

    super().validate_system()

    hamiltonian_name = self.system.hamiltonian_parameters.hamiltonian_name

    if hamiltonian_name not in self.allowed_hamiltonians:
        raise Exception(
            "Inconsistent hamiltonian and sampling types. Deterministic probability tables "
            "only support the following types: {}".format(self.allowed_hamiltonians)
        )
build

populates the probability tables for deterministic sampling

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/deterministic.py
def build(self):
    """
    populates the probability tables for deterministic sampling
    """

    self.compute_max_over_states(self.system.geometry.num_bonds, self.system.interactions.J_ij_vector)
    self.compute_spectrum_offset(self.system.hamiltonian_parameters.hamiltonian_name)
compute_spectrum_offset

computes the spectrum offset associated with the hamiltonian for SSE

Parameters:

  • hamiltonian_name (str) –

    represents the name of the Hamiltonian

Raises:

  • ValueError

    if the spectrum is requested and the Hamiltonian can not be used with deterministic sampling

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/deterministic.py
def compute_spectrum_offset(self, hamiltonian_name):
    """
    computes the spectrum offset associated with the hamiltonian for SSE

    Args:
        hamiltonian_name (str): represents the name of the Hamiltonian

    Raises:
        ValueError: if the spectrum is requested and the Hamiltonian can not be used with deterministic sampling
    """

    if hamiltonian_name == "XY":
        self.spectrum_offset = self.max_diag_norm
    elif hamiltonian_name == "fm_heisenberg_afm_Z" or hamiltonian_name == "fm_heisenberg_fm_Z":
        self.spectrum_offset = 0.5 * self.max_diag_norm
    else:
        raise ValueError(
            "Invalid Hamiltonian type: {} provided for deterministic probability tables. "
            "Allowed types are {}".format(hamiltonian_name, self.allowed_hamiltonians)
        )
compute_max_over_states

computes the max norm probability table required for diagonal updates in the two-step method. See: https://scipost.org/submissions/2107.00766v1/ and https://journals.aps.org/pre/abstract/10.1103/PhysRevE.66.046701 for details

Parameters:

  • num_bonds (int) –

    number of interacting bonds in the lattice

  • J_ij_vector (ndarray[float]) –

    num_bonds x 1 array containing the coupling strengths

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/deterministic.py
def compute_max_over_states(self, num_bonds, J_ij_vector):
    """
    computes the max norm probability table required for diagonal updates in the two-step method.
    See: https://scipost.org/submissions/2107.00766v1/ and https://journals.aps.org/pre/abstract/10.1103/PhysRevE.66.046701 for details

    Args:
        num_bonds (int): number of interacting bonds in the lattice
        J_ij_vector (numpy.ndarray[float]): num_bonds x 1 array containing the coupling strengths
    """

    max_over_states = np.zeros(num_bonds)
    max_diag_norm = 0.0

    for bond in range(num_bonds):
        J_ij = J_ij_vector[bond]
        max_over_states[bond] = 0.5 * J_ij

        max_diag_norm += 0.5 * J_ij

    self.max_over_states = max_over_states / max_diag_norm
    self.max_diag_norm = max_diag_norm
write_to_files

writes the probability tables to files for deterministic sampling

Parameters:

  • out_dir (str) –

    file path for writing probability tables

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/deterministic.py
def write_to_files(self, out_dir):
    """
    writes the probability tables to files for deterministic sampling

    Args:
        out_dir (str): file path for writing probability tables
    """

    super().write_to_files(out_dir)

    geometry_file_name = os.path.join(self.prob_dir, "geometry.csv")
    max_over_states_file_name = os.path.join(self.prob_dir, "max_over_states.csv")

    geometry_table = self.system.geometry.geometry_table
    num_bonds = self.system.geometry.num_bonds

    np.savetxt(geometry_file_name, geometry_table, delimiter=",", fmt="%d", header="NumBonds={}".format(num_bonds))

    header = "norm={},spectrum_offset={},loop_update_type={}".format(
        self.max_diag_norm, self.spectrum_offset, "deterministic"
    )

    np.savetxt(max_over_states_file_name, self.max_over_states, delimiter=",", header=header)

directed_loops

DirectedLoops

Bases: ProbabilityTable

ProbabilityTable subclass for directed loops sampling See: https://journals.aps.org/pre/abstract/10.1103/PhysRevE.66.046701 for details

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/directed_loops.py
class DirectedLoops(ProbabilityTable):
    """
    ProbabilityTable subclass for directed loops sampling
    See: https://journals.aps.org/pre/abstract/10.1103/PhysRevE.66.046701 for details
    """

    args = {"gamma": float, "ksi": float, "distance_dependent_offset": bool}
    allowed_hamiltonians = {"XXZ", "XXZh", "XY", "fm_heisenberg_fm_Z", "fm_heisenberg_afm_Z"}

    def __init__(self, system, gamma, ksi, distance_dependent_offset):
        """
        constructor computes the field contribution per bond, sets member variables and populates the directed loop probability tables

        Args:
            system (System): object representing the system to be simulated
            gamma (float): offset added to weights reduce bounces
            ksi (float): offset added in the presence of a large field to reduce bounces
            distance_dependent_offset (bool): determines whether a distance dependant offset should be used
        """

        super().__init__(system, gamma=gamma, ksi=ksi, distance_dependent_offset=distance_dependent_offset)

        self.gamma = gamma
        self.ksi = ksi
        self.distance_dependent_offset = distance_dependent_offset

        self.h_B = self.system.compute_h_B()

        self.build()

    def validate_system(self):
        """
        validates the systema associated with the instance of the ProbabilityTable object

        Raises:
            Exception: if, for the specified hamiltonian name, directed loop sampling can not be used
        """

        super().validate_system()

        hamiltonian_name = self.system.hamiltonian_parameter.hamiltonian_name

        if hamiltonian_name not in self.allowed_hamiltonians:
            raise Exception(
                "Inconsistent hamiltonian and sampling types. Directed loop probability tables "
                "only support the following types: {}".format(self.allowed_hamiltonians)
            )

    def build(self):
        """
        populates the directed loop probability tables
        """

        num_bonds = self.system.geometry.num_bonds

        self.initialize_tables(num_bonds)
        self.set_vertex_enum_transition_weights_map()

        J_ij_vector = self.system.interactions.J_ij_vector

        Delta = self.system.hamiltonian_parameters.Delta
        h_B = self.h_B

        gamma = self.gamma
        ksi = self.ksi
        distance_dependent_offset = self.distance_dependent_offset

        self.compute_prob_tables_directed_loops(
            num_bonds, J_ij_vector, gamma, h_B, Delta, ksi, distance_dependent_offset
        )

    def initialize_tables(self, num_bonds):
        """
        initializes the probability tables needed for directed loops sampling

        Args:
            num_bonds (int): number of interacting bonds in the lattice
        """

        num_rows = vu.num_vertices * vu.num_legs_indices

        self.directed_loop_prob_table = np.zeros((num_rows, num_bonds))
        self.diag_prob_table = np.zeros((vu.num_diagonal_vertices, num_bonds))
        self.max_over_states = np.zeros(num_bonds)
        self.vertex_weights = np.zeros((vu.num_vertices, num_bonds))

        self.spectrum_offset = 0.0
        self.max_diag_norm = 0.0

    def set_vertex_enum_transition_weights_map(self):

        self.vertex_weight_label_map = {}

        self.num_vertex_enums = 4
        self.directed_loop_vertex_enums = ["0", "1", "5", "3"]
        self.vertex_enum_weight_list_counts = [1, 2, 2, 1]

        vertex_enum = "0"
        exit_leg_weights_le_0_v_0 = ["b_3", None, "c", "b"]  # l_e = 0
        self.vertex_weight_label_map[vertex_enum] = [exit_leg_weights_le_0_v_0]

        vertex_enum = "1"
        exit_leg_weights_le_0_v_1 = ["b_2_p", "a_p", "c_p", None]  # l_e = 0
        exit_leg_weights_le_1_v_1 = ["a", "b_2", None, "c"]  # l_e = 1
        self.vertex_weight_label_map[vertex_enum] = [exit_leg_weights_le_0_v_1, exit_leg_weights_le_1_v_1]

        vertex_enum = "5"
        exit_leg_weights_le_0_v_5 = ["b_1", "a", None, "b"]  # l_e = 0
        exit_leg_weights_le_1_v_5 = ["a_p", "b_1_p", "b_p", None]  # l_e = 1
        self.vertex_weight_label_map[vertex_enum] = [exit_leg_weights_le_0_v_5, exit_leg_weights_le_1_v_5]

        vertex_enum = "3"
        exit_leg_weights_le_0_v_3 = ["b_3_p", None, "c_p", "b_p"]  # l_e = 0
        self.vertex_weight_label_map[vertex_enum] = [exit_leg_weights_le_0_v_3]

    def update_directed_loop_probs(self, vertex_enum, l_e, l_x, bond, transition_weight):

        init_composite_leg_index, init_row_index = self.get_composite_row_prob_index(vertex_enum, l_e, l_x)
        normalization = self.vertex_weights[vertex_enum, bond]
        if normalization != 0.0:
            self.directed_loop_prob_table[init_row_index, bond] = mu.set_probability(transition_weight / normalization)

        new_vertex_enum, new_l_e, new_l_x = self.get_symmetric_indices(vertex_enum, l_e, l_x, vu.vertical_swap_mapping)
        new_composite_leg_index, new_row_index = self.get_composite_row_prob_index(new_vertex_enum, new_l_e, new_l_x)
        self.directed_loop_prob_table[new_row_index, bond] = self.directed_loop_prob_table[init_row_index, bond]

        new_vertex_enum, new_l_e, new_l_x = self.get_symmetric_indices(
            vertex_enum, l_e, l_x, vu.horizontal_swap_mapping
        )
        new_composite_leg_index, new_row_index = self.get_composite_row_prob_index(new_vertex_enum, new_l_e, new_l_x)
        self.directed_loop_prob_table[new_row_index, bond] = self.directed_loop_prob_table[init_row_index, bond]

        new_vertex_enum, new_l_e, new_l_x = self.get_symmetric_indices(vertex_enum, l_e, l_x, vu.composed_swaps_mapping)
        new_composite_leg_index, new_row_index = self.get_composite_row_prob_index(new_vertex_enum, new_l_e, new_l_x)
        self.directed_loop_prob_table[new_row_index, bond] = self.directed_loop_prob_table[init_row_index, bond]

    def get_composite_row_prob_index(self, vertex_enum, entrance_leg_enum, exit_leg_enum):

        composite_leg_index = vu.num_legs_per_vertex * entrance_leg_enum + exit_leg_enum
        row_index = vu.num_legs_indices * vertex_enum + composite_leg_index

        return composite_leg_index, row_index

    def get_symmetric_indices(self, vertex_enum, entrance_leg_enum, exit_leg_enum, symmetry_leg_mapping):

        init_spin_tuple = vu.leg_spin[vertex_enum]
        new_spin_tuple = (
            init_spin_tuple[symmetry_leg_mapping[0]],
            init_spin_tuple[symmetry_leg_mapping[1]],
            init_spin_tuple[symmetry_leg_mapping[2]],
            init_spin_tuple[symmetry_leg_mapping[3]],
        )
        new_vertex_enum = vu.vertex_map[new_spin_tuple]

        new_entrance_leg_enum = symmetry_leg_mapping[entrance_leg_enum]
        new_exit_leg_enum = symmetry_leg_mapping[exit_leg_enum]

        return new_vertex_enum, new_entrance_leg_enum, new_exit_leg_enum

    def update_directed_loop_table(self, bond, transition_weights):

        for i in range(self.num_vertex_enums):
            vertex_enum = self.directed_loop_vertex_enums[i]
            num_unique_entrance_legs = self.vertex_enum_weight_list_counts[i]

            for l_e in range(num_unique_entrance_legs):
                exit_leg_weight_labels = self.vertex_weight_label_map[vertex_enum][l_e]

                for l_x in range(vu.num_legs_per_vertex):
                    l_x_weight_label = exit_leg_weight_labels[l_x]
                    l_x_weight = transition_weights[l_x_weight_label] if l_x_weight_label is not None else 0.0

                    self.update_directed_loop_probs(int(vertex_enum), l_e, l_x, bond, l_x_weight)

    def compute_prob_tables_directed_loops(
        self, num_bonds, J_ij_vector, gamma, h_B, Delta, ksi, distance_dependent_offset
    ):

        self.transition_weights_calculator = LoopTransitionWeights(gamma, Delta, h_B, ksi, distance_dependent_offset)

        for bond in range(num_bonds):
            J_ij = J_ij_vector[bond]

            vu.set_vertex_weights(self.vertex_weights, bond, Delta, J_ij, h_B)

            self.diag_prob_table[:, bond] = self.vertex_weights[0:4, bond]

            self.transition_weights_calculator.compute_transition_weights(J_ij)

            transition_weights = self.transition_weights_calculator.transition_weight_container
            offset = self.transition_weights_calculator.offset_b

            self.vertex_weights[0:4, bond] += offset
            self.spectrum_offset += offset

            self.diag_prob_table[:, bond] += offset

            self.diag_prob_table = mu.enforce_positive(self.diag_prob_table, bond)
            self.vertex_weights = mu.enforce_positive(self.vertex_weights, bond)

            self.update_directed_loop_table(bond, transition_weights)

            self.max_over_states[bond] = np.max(self.diag_prob_table[:, bond])
            self.diag_prob_table[:, bond] /= self.max_over_states[bond]
            self.max_diag_norm += self.max_over_states[bond]

        self.max_over_states[:] /= self.max_diag_norm

    def write_to_files(self, out_dir):
        """
        writes the probability tables to csv files for SSE engine

        Args:
            out_dir (str): directory path for writing probability tables
        """

        super().write_to_files(out_dir)

        geometry_file_name = os.path.join(self.prob_dir, "geometry.csv")
        diag_file_name = os.path.join(self.prob_dir, "diag_probs.csv")
        max_over_states_file_name = os.path.join(self.prob_dir, "max_over_states.csv")
        loop_update_table_file_name = os.path.join(self.prob_dir, "off_diag_table.csv")
        vertex_weights_file_name = os.path.join(self.prob_dir, "vertex_weights.csv")

        geometry_table = self.system.geometry.geometry_table
        num_bonds = self.system.geometry.num_bonds
        np.savetxt(geometry_file_name, geometry_table, delimiter=",", fmt="%d", header="NumBonds={}".format(num_bonds))

        header = "norm={},spectrum_offset={},loop_update_type={}".format(
            self.max_diag_norm, self.spectrum_offset, "DirectedLoops"
        )

        np.savetxt(diag_file_name, self.diag_prob_table, delimiter=",", header=header)
        np.savetxt(vertex_weights_file_name, self.vertex_weights, delimiter=",", header=header)
        np.savetxt(max_over_states_file_name, self.max_over_states, delimiter=",", header=header)
        np.savetxt(loop_update_table_file_name, self.directed_loop_prob_table, delimiter=",", header=header)
__init__

constructor computes the field contribution per bond, sets member variables and populates the directed loop probability tables

Parameters:

  • system (System) –

    object representing the system to be simulated

  • gamma (float) –

    offset added to weights reduce bounces

  • ksi (float) –

    offset added in the presence of a large field to reduce bounces

  • distance_dependent_offset (bool) –

    determines whether a distance dependant offset should be used

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/directed_loops.py
def __init__(self, system, gamma, ksi, distance_dependent_offset):
    """
    constructor computes the field contribution per bond, sets member variables and populates the directed loop probability tables

    Args:
        system (System): object representing the system to be simulated
        gamma (float): offset added to weights reduce bounces
        ksi (float): offset added in the presence of a large field to reduce bounces
        distance_dependent_offset (bool): determines whether a distance dependant offset should be used
    """

    super().__init__(system, gamma=gamma, ksi=ksi, distance_dependent_offset=distance_dependent_offset)

    self.gamma = gamma
    self.ksi = ksi
    self.distance_dependent_offset = distance_dependent_offset

    self.h_B = self.system.compute_h_B()

    self.build()
validate_system

validates the systema associated with the instance of the ProbabilityTable object

Raises:

  • Exception

    if, for the specified hamiltonian name, directed loop sampling can not be used

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/directed_loops.py
def validate_system(self):
    """
    validates the systema associated with the instance of the ProbabilityTable object

    Raises:
        Exception: if, for the specified hamiltonian name, directed loop sampling can not be used
    """

    super().validate_system()

    hamiltonian_name = self.system.hamiltonian_parameter.hamiltonian_name

    if hamiltonian_name not in self.allowed_hamiltonians:
        raise Exception(
            "Inconsistent hamiltonian and sampling types. Directed loop probability tables "
            "only support the following types: {}".format(self.allowed_hamiltonians)
        )
build

populates the directed loop probability tables

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/directed_loops.py
def build(self):
    """
    populates the directed loop probability tables
    """

    num_bonds = self.system.geometry.num_bonds

    self.initialize_tables(num_bonds)
    self.set_vertex_enum_transition_weights_map()

    J_ij_vector = self.system.interactions.J_ij_vector

    Delta = self.system.hamiltonian_parameters.Delta
    h_B = self.h_B

    gamma = self.gamma
    ksi = self.ksi
    distance_dependent_offset = self.distance_dependent_offset

    self.compute_prob_tables_directed_loops(
        num_bonds, J_ij_vector, gamma, h_B, Delta, ksi, distance_dependent_offset
    )
initialize_tables

initializes the probability tables needed for directed loops sampling

Parameters:

  • num_bonds (int) –

    number of interacting bonds in the lattice

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/directed_loops.py
def initialize_tables(self, num_bonds):
    """
    initializes the probability tables needed for directed loops sampling

    Args:
        num_bonds (int): number of interacting bonds in the lattice
    """

    num_rows = vu.num_vertices * vu.num_legs_indices

    self.directed_loop_prob_table = np.zeros((num_rows, num_bonds))
    self.diag_prob_table = np.zeros((vu.num_diagonal_vertices, num_bonds))
    self.max_over_states = np.zeros(num_bonds)
    self.vertex_weights = np.zeros((vu.num_vertices, num_bonds))

    self.spectrum_offset = 0.0
    self.max_diag_norm = 0.0
write_to_files

writes the probability tables to csv files for SSE engine

Parameters:

  • out_dir (str) –

    directory path for writing probability tables

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/directed_loops.py
def write_to_files(self, out_dir):
    """
    writes the probability tables to csv files for SSE engine

    Args:
        out_dir (str): directory path for writing probability tables
    """

    super().write_to_files(out_dir)

    geometry_file_name = os.path.join(self.prob_dir, "geometry.csv")
    diag_file_name = os.path.join(self.prob_dir, "diag_probs.csv")
    max_over_states_file_name = os.path.join(self.prob_dir, "max_over_states.csv")
    loop_update_table_file_name = os.path.join(self.prob_dir, "off_diag_table.csv")
    vertex_weights_file_name = os.path.join(self.prob_dir, "vertex_weights.csv")

    geometry_table = self.system.geometry.geometry_table
    num_bonds = self.system.geometry.num_bonds
    np.savetxt(geometry_file_name, geometry_table, delimiter=",", fmt="%d", header="NumBonds={}".format(num_bonds))

    header = "norm={},spectrum_offset={},loop_update_type={}".format(
        self.max_diag_norm, self.spectrum_offset, "DirectedLoops"
    )

    np.savetxt(diag_file_name, self.diag_prob_table, delimiter=",", header=header)
    np.savetxt(vertex_weights_file_name, self.vertex_weights, delimiter=",", header=header)
    np.savetxt(max_over_states_file_name, self.max_over_states, delimiter=",", header=header)
    np.savetxt(loop_update_table_file_name, self.directed_loop_prob_table, delimiter=",", header=header)
LoopTransitionWeights

contains the logic for computing the transition weights in the directed loop SSE method See https://journals.aps.org/pre/abstract/10.1103/PhysRevE.66.046701 for details

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/directed_loops.py
class LoopTransitionWeights:
    """
    contains the logic for computing the transition weights in the directed loop SSE method
    See https://journals.aps.org/pre/abstract/10.1103/PhysRevE.66.046701 for details
    """

    def __init__(self, gamma, Delta, h_B, ksi, distance_dependent_offset):

        keys = ["a", "b", "c", "a_p", "b_p", "c_p", "b_1", "b_2", "b_3", "b_1_p", "b_2_p", "b_3_p"]
        self.transition_weight_container = {key: None for key in keys}

        self.Delta = Delta
        self.h_B = h_B

        self.gamma = gamma
        self.ksi = ksi
        self.distance_dependent_offset = distance_dependent_offset

        if self.h_B < 0.0:
            raise Exception("h_B needs to be greater than or equal to 0")

    def populate_unprimed_transition_weights(self, a, b, c):

        self.transition_weight_container["a"] = a
        self.transition_weight_container["b"] = b
        self.transition_weight_container["c"] = c

    def populate_primed_transition_weights(self, a_p, b_p, c_p):

        self.transition_weight_container["a_p"] = a_p
        self.transition_weight_container["b_p"] = b_p
        self.transition_weight_container["c_p"] = c_p

    def populate_unprimed_bounce_weights(self, b_1, b_2, b_3):

        self.transition_weight_container["b_1"] = b_1
        self.transition_weight_container["b_2"] = b_2
        self.transition_weight_container["b_3"] = b_3

    def populate_primed_bounce_weights(self, b_1_p, b_2_p, b_3_p):

        self.transition_weight_container["b_1_p"] = b_1_p
        self.transition_weight_container["b_2_p"] = b_2_p
        self.transition_weight_container["b_3_p"] = b_3_p

    def tranisiton_weights_small_field(self, Delta_over_four_J_ij, Delta_positive, Delta_negative):

        self.offset_b = Delta_over_four_J_ij

        if self.h_B >= Delta_negative:
            b_3_p = 0.0
            epsilon = -Delta_negative / 2.0 + self.h_B / 2.0 + self.gamma
        else:
            b_3_p = Delta_negative - self.h_B + self.ksi
            epsilon = self.gamma

        if self.h_B <= -Delta_negative:
            b_3 = 0.0
        else:
            b_3 = Delta_negative + self.h_B + self.ksi

        a_p = -Delta_negative / 2.0 + self.h_B / 2.0 + b_3_p / 2.0
        b_p = Delta_positive / 2.0 - self.h_B / 2.0 - b_3_p / 2.0
        c_p = Delta_negative / 2.0 + epsilon - self.h_B / 2.0 - b_3_p / 2.0

        a = -Delta_negative / 2.0 - self.h_B / 2.0 + b_3 / 2.0
        b = Delta_positive / 2.0 + self.h_B / 2.0 - b_3 / 2.0
        c = epsilon + Delta_negative / 2.0 + self.h_B / 2.0 - b_3 / 2.0

        b_1_p = 0.0
        b_2_p = 0.0
        b_1 = 0.0
        b_2 = 0.0

        self.offset_b += epsilon
        self.epsilon = epsilon

        self.populate_unprimed_transition_weights(a, b, c)
        self.populate_primed_transition_weights(a_p, b_p, c_p)
        self.populate_unprimed_bounce_weights(b_1, b_2, b_3)
        self.populate_primed_bounce_weights(b_1_p, b_2_p, b_3_p)

    def transition_weights_negative_Delta(self, Delta_over_four_J_ij, Delta_positive, Delta_negative, J_ij):

        if self.h_B == 0.0:
            self.offset_b = -(self.Delta / 4.0) * J_ij
            if self.Delta <= -1.0:
                if self.distance_dependent_offset:
                    epsilon = self.gamma - (self.Delta / 10.0) * J_ij
                    c_p = self.gamma - (self.Delta / 10.0) * J_ij
                    # epsilon = gamma + 0.1/(r_b_pow_alpha)
                    # c_p = gamma + 0.1/(r_b_pow_alpha)
                else:
                    epsilon = self.gamma
                    c_p = self.gamma
                c = c_p
                a_p = (1.0 / 2.0) * J_ij
                b_p = 0.0
                a = a_p
                b = b_p
                b_2_p = -((1.0 + self.Delta) / 2.0) * J_ij
                b_2 = b_2_p
                b_3_p = 0.0
                b_1_p = 0.0
                b_1 = b_1_p
                b_3 = b_3_p
            else:
                b_2_p = 0.0
                b_p = ((1.0 + self.Delta) / (4.0)) * J_ij
                a_p = ((1.0 - self.Delta) / (4.0)) * J_ij
                c_p = self.gamma
                epsilon = ((1.0 + self.Delta) / (4.0)) * J_ij + self.gamma
                c = c_p
                a = a_p
                b = b_p
                b_1_p = 0.0
                b_3_p = 0.0
                b_2 = b_2_p
                b_1 = b_1_p
                b_3 = b_3_p
        else:
            self.offset_b = self.h_B - Delta_over_four_J_ij

            if self.h_B <= Delta_positive:
                b_2_p = 0.0
                epsilon = Delta_positive / 2.0 - self.h_B / 2.0 + self.gamma
            else:
                b_2_p = self.h_B - Delta_positive + self.ksi
                epsilon = self.gamma

            if self.h_B <= -Delta_positive:
                b_2 = -self.h_B - Delta_positive + self.ksi
            else:
                b_2 = 0.0

            if self.h_B <= -Delta_negative:
                b_3 = 0.0
            else:
                b_3 = self.h_B + Delta_negative + self.ksi

            a_p = -Delta_negative / 2.0 + self.h_B / 2.0 - b_2_p / 2.0
            b_p = Delta_positive / 2.0 - self.h_B / 2.0 + b_2_p / 2.0
            c_p = epsilon - Delta_positive / 2.0 + self.h_B / 2.0 - b_2_p / 2.0

            a = -Delta_negative / 2.0 - self.h_B / 2.0 + b_3 / 2.0 - b_2 / 2.0
            b = Delta_positive / 2.0 + self.h_B / 2.0 + b_2 / 2.0 - b_3 / 2.0
            c = 3.0 * self.h_B / 2.0 + epsilon - Delta_positive / 2.0 - b_2 / 2.0 - b_3 / 2.0

            b_1 = 0.0
            b_1_p = 0.0
            b_3_p = 0.0

        self.offset_b += epsilon
        self.epsilon = epsilon

        self.populate_unprimed_transition_weights(a, b, c)
        self.populate_primed_transition_weights(a_p, b_p, c_p)
        self.populate_unprimed_bounce_weights(b_1, b_2, b_3)
        self.populate_primed_bounce_weights(b_1_p, b_2_p, b_3_p)

    def transition_weights_large_field(self, Delta_positive, Delta_negative, J_ij):

        self.offset_b = self.h_B
        one_over_four_J_ij = (1.0 / 4.0) * J_ij

        if self.h_B <= Delta_negative:
            b_3_p = Delta_negative - self.h_B + self.ksi
            epsilon = self.gamma
        else:
            b_3_p = 0.0
            if self.h_B <= Delta_positive and self.h_B <= 2.0 * one_over_four_J_ij:
                epsilon = one_over_four_J_ij - self.h_B / 2.0 + self.gamma
            else:
                epsilon = self.gamma

        if self.h_B <= Delta_positive:
            b_2_p = 0.0
        else:
            b_2_p = self.h_B - Delta_positive + self.ksi

        if self.h_B < -Delta_negative:
            b_3 = 0
        else:
            b_3 = self.h_B + Delta_negative + self.ksi

        a_p = -Delta_negative / 2.0 + self.h_B / 2.0 + b_3_p / 2.0 - b_2_p / 2.0
        b_p = Delta_positive / 2.0 - self.h_B / 2.0 - b_3_p / 2.0 + b_2_p / 2.0
        c_p = epsilon - one_over_four_J_ij + self.h_B / 2.0 - b_3_p / 2.0 - b_2_p / 2.0

        a = -Delta_negative / 2.0 - self.h_B / 2.0 + b_3 / 2.0
        b = Delta_positive / 2.0 + self.h_B / 2.0 - b_3 / 2.0
        c = epsilon - one_over_four_J_ij + 3.0 * self.h_B / 2.0 - b_3 / 2.0

        b_1 = 0.0
        b_2 = 0.0
        b_1_p = 0.0

        self.offset_b += epsilon
        self.epsilon = epsilon

        self.populate_unprimed_transition_weights(a, b, c)
        self.populate_primed_transition_weights(a_p, b_p, c_p)
        self.populate_unprimed_bounce_weights(b_1, b_2, b_3)
        self.populate_primed_bounce_weights(b_1_p, b_2_p, b_3_p)

    def compute_transition_weights(self, J_ij):

        Delta_over_four_J_ij = (self.Delta / 4.0) * J_ij

        Delta_positive = ((self.Delta + 1.0) / (2.0)) * J_ij
        Delta_negative = ((self.Delta - 1.0) / (2.0)) * J_ij

        if Delta_over_four_J_ij > self.h_B:
            self.tranisiton_weights_small_field(Delta_over_four_J_ij, Delta_positive, Delta_negative)
        elif self.Delta < 0.0:
            self.transition_weights_negative_Delta(Delta_over_four_J_ij, Delta_positive, Delta_negative, J_ij)
        else:
            self.transition_weights_large_field(Delta_positive, Delta_negative, J_ij)

factory

ProbabilityTableFactory

Factory for generating the required instance of the ProbabilityTable subclass. Carries a registry of ProbabilityTable subclasses

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/factory.py
class ProbabilityTableFactory:
    """
    Factory for generating the required instance of the ProbabilityTable subclass. Carries a registry of ProbabilityTable subclasses
    """

    registry = {}

    def register(cls, name, subclass):
        """
        adds the specified subclass to the registry

        Args:
            name (str): name to be used for subclass
            subclass (Type[ProbabilityTable]): ProbabilityTable subclass to be registered
        """

        cls.registry[name] = subclass

    def extract_args(cls, name, **kwargs):
        """
        extracts the arguments associated with a given subclass

        Args:
            name (str): subclass name (must exist in registry)
            **kwargs (dict): key word arguments. Must contain inputs for the specific subclass

        Returns:
            (dict): contains the subclass arguments as key value pairs
        """

        arg_vals = {}
        for key, arg_dtype in cls.registry[name].args.items():
            arg_vals[key] = arg_dtype(kwargs[key])

        return arg_vals

    def create(cls, name, system, **kwargs):
        """
        creates an instance of the subclass specified

        Args:
            name (str): name of requested subclass

        Raises:
            Exception: if the requested ProbabilityTable is not found in the registry

        Returns:
            (ProbabilityTable): instance of the the requested subclass
        """

        if name not in cls.registry:
            raise Exception(f"Probability Table implementation not found for name: {name}")
        else:
            return cls.registry[name](system, **kwargs)
register

adds the specified subclass to the registry

Parameters:

  • name (str) –

    name to be used for subclass

  • subclass (Type[ProbabilityTable]) –

    ProbabilityTable subclass to be registered

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/factory.py
def register(cls, name, subclass):
    """
    adds the specified subclass to the registry

    Args:
        name (str): name to be used for subclass
        subclass (Type[ProbabilityTable]): ProbabilityTable subclass to be registered
    """

    cls.registry[name] = subclass
extract_args

extracts the arguments associated with a given subclass

Parameters:

  • name (str) –

    subclass name (must exist in registry)

  • **kwargs (dict, default: {} ) –

    key word arguments. Must contain inputs for the specific subclass

Returns:

  • dict

    contains the subclass arguments as key value pairs

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/factory.py
def extract_args(cls, name, **kwargs):
    """
    extracts the arguments associated with a given subclass

    Args:
        name (str): subclass name (must exist in registry)
        **kwargs (dict): key word arguments. Must contain inputs for the specific subclass

    Returns:
        (dict): contains the subclass arguments as key value pairs
    """

    arg_vals = {}
    for key, arg_dtype in cls.registry[name].args.items():
        arg_vals[key] = arg_dtype(kwargs[key])

    return arg_vals
create

creates an instance of the subclass specified

Parameters:

  • name (str) –

    name of requested subclass

Raises:

  • Exception

    if the requested ProbabilityTable is not found in the registry

Returns:

  • ProbabilityTable

    instance of the the requested subclass

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/factory.py
def create(cls, name, system, **kwargs):
    """
    creates an instance of the subclass specified

    Args:
        name (str): name of requested subclass

    Raises:
        Exception: if the requested ProbabilityTable is not found in the registry

    Returns:
        (ProbabilityTable): instance of the the requested subclass
    """

    if name not in cls.registry:
        raise Exception(f"Probability Table implementation not found for name: {name}")
    else:
        return cls.registry[name](system, **kwargs)

heatbath

Heatbath

Bases: ProbabilityTable

ProbabilityTable subclass for heatbath sampling See: https://journals.aps.org/pre/abstract/10.1103/PhysRevE.66.046701 for details

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/heatbath.py
class Heatbath(ProbabilityTable):
    """
    ProbabilityTable subclass for heatbath sampling
    See: https://journals.aps.org/pre/abstract/10.1103/PhysRevE.66.046701 for details
    """

    args = {"gamma": float}
    allowed_hamiltonians = {"XXZ", "XXZh", "XY", "fm_heisenberg_fm_Z", "fm_heisenberg_afm_Z"}

    def __init__(self, system, gamma):
        """
        constructor computes the field contribution per bond, sets member variables and populates the heatbath probability tables

        Args:
            system (System): object representing the system to be simulated
            gamma (float): offset added to weights reduce bounces
        """

        super().__init__(system, gamma=gamma)

        self.gamma = gamma

        self.h_B = self.system.compute_h_B()

        self.validate_system()

        self.build()

    def validate_system(self):
        """
        validates the systema associated with the instance of the ProbabilityTable object

        Raises:
            Exception: if, for the specified hamiltonian name, heatbath sampling can not be used
        """

        super().validate_system()

        hamiltonian_name = self.system.hamiltonian_parameters.hamiltonian_name

        if hamiltonian_name not in self.allowed_hamiltonians:
            raise Exception(
                "Inconsistent hamiltonian and sampling types. Heatbath probability tables "
                "only support the following types: {}".format(self.allowed_hamiltonians)
            )

    def build(self):
        """
        populates the heatbath probability tables
        """

        num_bonds = self.system.geometry.num_bonds
        J_ij_vector = self.system.interactions.J_ij_vector
        gamma = self.gamma
        h_B = self.h_B
        Delta = self.system.hamiltonian_parameters.Delta

        self.initialize_tables(num_bonds)
        self.compute_prob_tables_heat_bath(num_bonds, J_ij_vector, gamma, h_B, Delta)

        return 0

    def initialize_tables(self, num_bonds):
        """
        initializes the probability tables needed for heatbath sampling

        Args:
            num_bonds (int): number of interacting bonds in the lattice
        """

        self.num_rows = vu.num_vertices * vu.num_legs_indices
        self.heat_bath_prob_table = np.zeros((self.num_rows, num_bonds))

        self.diag_prob_table = np.zeros((vu.num_diagonal_vertices, num_bonds))
        self.max_over_states = np.zeros(num_bonds)
        self.vertex_weights = np.zeros((vu.num_vertices, num_bonds))

        self.spectrum_offset = 0.0
        self.max_diag_norm = 0.0

        return 0

    # Helper function used by compute_prob_tables_heat_bath
    def compute_offset(self, gamma, Delta, J_ij, h_B):

        if h_B < 0.0:
            raise Exception("h_B needs to be greater than or equal to 0")
        else:
            Delta_over_four_J_ij = (Delta / 4.0) * J_ij

            if Delta_over_four_J_ij > h_B:
                offset_b = Delta_over_four_J_ij

            elif Delta < 0.0:
                offset_b = h_B - Delta_over_four_J_ij

            else:
                offset_b = h_B

            offset_b += gamma

            return offset_b

    def update_heat_bath_probs(self, bond):

        for vertex_enum in range(vu.num_vertices):
            for l_e in range(vu.num_legs_per_vertex):
                norm = 0.0
                count_invalid_vertices = 0

                for l_x in range(vu.num_legs_per_vertex):
                    composite_leg_index = vu.num_legs_per_vertex * l_e + l_x
                    row_index = vu.num_legs_indices * vertex_enum + composite_leg_index

                    # new_vertex = get_new_vertex(v_map, l_spin, l_e, l_x, vertex_enum)
                    new_vertex = vu.new_vertex_map[vertex_enum, composite_leg_index]

                    if new_vertex == -1:
                        count_invalid_vertices += 1
                        self.heat_bath_prob_table[row_index, bond] = 0.0
                    else:
                        self.heat_bath_prob_table[row_index, bond] = mu.set_probability(
                            self.vertex_weights[new_vertex, bond]
                        )
                        norm += self.vertex_weights[new_vertex, bond]

                self.heat_bath_prob_table[row_index - vu.num_legs_per_vertex + 1 : row_index + 1, bond] /= norm

        return 0

    # Generating this table might be slow for large systems because size grows as N^2.
    # Simpler but slightly slower approach would be to:
    # compute non-zero heat bath probabilities on the fly
    def compute_prob_tables_heat_bath(self, num_bonds, J_ij_vector, gamma, h_B, Delta):

        for bond in range(num_bonds):
            J_ij = J_ij_vector[bond]

            vu.set_vertex_weights(self.vertex_weights, bond, Delta, J_ij, h_B)

            self.diag_prob_table[:, bond] = self.vertex_weights[0:4, bond]

            offset = self.compute_offset(gamma, Delta, J_ij, h_B)

            self.vertex_weights[0:4, bond] += offset
            self.spectrum_offset += offset

            self.diag_prob_table[:, bond] += offset

            self.diag_prob_table = mu.enforce_positive(self.diag_prob_table, bond)
            self.vertex_weights = mu.enforce_positive(self.vertex_weights, bond)

            self.max_over_states[bond] = np.max(self.diag_prob_table[:, bond])
            self.diag_prob_table[:, bond] /= self.max_over_states[bond]
            self.max_diag_norm += self.max_over_states[bond]

            self.update_heat_bath_probs(bond)

        self.max_over_states[:] /= self.max_diag_norm

        return 0

    def write_to_files(self, out_dir):
        """
        writes the probability tables to csv files for SSE engine

        Args:
            out_dir (str): directory path for writing probability tables
        """

        super().write_to_files(out_dir)

        geometry_file_name = os.path.join(self.prob_dir, "geometry.csv")
        diag_file_name = os.path.join(self.prob_dir, "diag_probs.csv")
        max_over_states_file_name = os.path.join(self.prob_dir, "max_over_states.csv")
        loop_update_table_file_name = os.path.join(self.prob_dir, "off_diag_table.csv")
        vertex_weights_file_name = os.path.join(self.prob_dir, "vertex_weights.csv")

        geometry_table = self.system.geometry.geometry_table
        num_bonds = self.system.geometry.num_bonds
        np.savetxt(geometry_file_name, geometry_table, delimiter=",", fmt="%d", header="NumBonds={}".format(num_bonds))

        header = "norm={},spectrum_offset={},loop_update_type={}".format(
            self.max_diag_norm, self.spectrum_offset, "heatbath"
        )

        np.savetxt(diag_file_name, self.diag_prob_table, delimiter=",", header=header)
        np.savetxt(vertex_weights_file_name, self.vertex_weights, delimiter=",", header=header)
        np.savetxt(max_over_states_file_name, self.max_over_states, delimiter=",", header=header)
        np.savetxt(loop_update_table_file_name, self.heat_bath_prob_table, delimiter=",", header=header)

        return 0
__init__

constructor computes the field contribution per bond, sets member variables and populates the heatbath probability tables

Parameters:

  • system (System) –

    object representing the system to be simulated

  • gamma (float) –

    offset added to weights reduce bounces

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/heatbath.py
def __init__(self, system, gamma):
    """
    constructor computes the field contribution per bond, sets member variables and populates the heatbath probability tables

    Args:
        system (System): object representing the system to be simulated
        gamma (float): offset added to weights reduce bounces
    """

    super().__init__(system, gamma=gamma)

    self.gamma = gamma

    self.h_B = self.system.compute_h_B()

    self.validate_system()

    self.build()
validate_system

validates the systema associated with the instance of the ProbabilityTable object

Raises:

  • Exception

    if, for the specified hamiltonian name, heatbath sampling can not be used

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/heatbath.py
def validate_system(self):
    """
    validates the systema associated with the instance of the ProbabilityTable object

    Raises:
        Exception: if, for the specified hamiltonian name, heatbath sampling can not be used
    """

    super().validate_system()

    hamiltonian_name = self.system.hamiltonian_parameters.hamiltonian_name

    if hamiltonian_name not in self.allowed_hamiltonians:
        raise Exception(
            "Inconsistent hamiltonian and sampling types. Heatbath probability tables "
            "only support the following types: {}".format(self.allowed_hamiltonians)
        )
build

populates the heatbath probability tables

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/heatbath.py
def build(self):
    """
    populates the heatbath probability tables
    """

    num_bonds = self.system.geometry.num_bonds
    J_ij_vector = self.system.interactions.J_ij_vector
    gamma = self.gamma
    h_B = self.h_B
    Delta = self.system.hamiltonian_parameters.Delta

    self.initialize_tables(num_bonds)
    self.compute_prob_tables_heat_bath(num_bonds, J_ij_vector, gamma, h_B, Delta)

    return 0
initialize_tables

initializes the probability tables needed for heatbath sampling

Parameters:

  • num_bonds (int) –

    number of interacting bonds in the lattice

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/heatbath.py
def initialize_tables(self, num_bonds):
    """
    initializes the probability tables needed for heatbath sampling

    Args:
        num_bonds (int): number of interacting bonds in the lattice
    """

    self.num_rows = vu.num_vertices * vu.num_legs_indices
    self.heat_bath_prob_table = np.zeros((self.num_rows, num_bonds))

    self.diag_prob_table = np.zeros((vu.num_diagonal_vertices, num_bonds))
    self.max_over_states = np.zeros(num_bonds)
    self.vertex_weights = np.zeros((vu.num_vertices, num_bonds))

    self.spectrum_offset = 0.0
    self.max_diag_norm = 0.0

    return 0
write_to_files

writes the probability tables to csv files for SSE engine

Parameters:

  • out_dir (str) –

    directory path for writing probability tables

Source code in src/oqd_heisenberg_ion/simulators/qmc/long_range/preprocess/probability_table/heatbath.py
def write_to_files(self, out_dir):
    """
    writes the probability tables to csv files for SSE engine

    Args:
        out_dir (str): directory path for writing probability tables
    """

    super().write_to_files(out_dir)

    geometry_file_name = os.path.join(self.prob_dir, "geometry.csv")
    diag_file_name = os.path.join(self.prob_dir, "diag_probs.csv")
    max_over_states_file_name = os.path.join(self.prob_dir, "max_over_states.csv")
    loop_update_table_file_name = os.path.join(self.prob_dir, "off_diag_table.csv")
    vertex_weights_file_name = os.path.join(self.prob_dir, "vertex_weights.csv")

    geometry_table = self.system.geometry.geometry_table
    num_bonds = self.system.geometry.num_bonds
    np.savetxt(geometry_file_name, geometry_table, delimiter=",", fmt="%d", header="NumBonds={}".format(num_bonds))

    header = "norm={},spectrum_offset={},loop_update_type={}".format(
        self.max_diag_norm, self.spectrum_offset, "heatbath"
    )

    np.savetxt(diag_file_name, self.diag_prob_table, delimiter=",", header=header)
    np.savetxt(vertex_weights_file_name, self.vertex_weights, delimiter=",", header=header)
    np.savetxt(max_over_states_file_name, self.max_over_states, delimiter=",", header=header)
    np.savetxt(loop_update_table_file_name, self.heat_bath_prob_table, delimiter=",", header=header)

    return 0