ExaTN
Classes | Typedefs | Enumerations | Functions | Variables
exatn Namespace Reference

Classes

class  Cloneable
 
class  Identifiable
 
struct  IndexLabel
 
class  NumServer
 
class  ServiceRegistry
 

Typedefs

using TensorMethod = talsh::TensorFunctor< Identifiable >
 
using Int4 = int32_t
 
using Int8 = int64_t
 
using UInt4 = uint32_t
 
using UInt8 = uint64_t
 
using SpaceId = unsigned int
 
using SubspaceId = unsigned long long int
 
using SymmetryId = int
 
using DimExtent = unsigned long long int
 
using DimOffset = unsigned long long int
 
using ScopeId = unsigned int
 

Enumerations

enum  LegDirection { UNDIRECT, INWARD, OUTWARD }
 
enum  TensorOpCode {
  NOOP, CREATE, DESTROY, TRANSFORM,
  ADD, CONTRACT
}
 
enum  TensorElementType {
  VOID, REAL16, REAL32, REAL64,
  COMPLEX16, COMPLEX32, COMPLEX64
}
 

Functions

void initialize ()
 
bool isInitialized ()
 
void finalize ()
 
ScopeId openScope (const std::string &scope_name)
 
ScopeId closeScope ()
 
SpaceId createVectorSpace (const std::string &space_name, DimExtent space_dim, const VectorSpace **space_ptr=nullptr)
 
void destroyVectorSpace (const std::string &space_name)
 
void destroyVectorSpace (SpaceId space_id)
 
SubspaceId createSubspace (const std::string &subspace_name, const std::string &space_name, const std::pair< DimOffset, DimOffset > bounds, const Subspace **subspace_ptr=nullptr)
 
void destroySubspace (const std::string &subspace_name)
 
void destroySubspace (SubspaceId subspace_id)
 
const SubspacegetSubspace (const std::string &subspace_name)
 
void registerTensorMethod (const std::string &tag, std::shared_ptr< TensorMethod > method)
 
std::shared_ptr< TensorMethod > getTensorMethod (const std::string &tag)
 
void registerExternalData (const std::string &tag, std::shared_ptr< BytePacket > packet)
 
std::shared_ptr< BytePacket > getExternalData (const std::string &tag)
 
template<typename... Args>
bool createTensor (const std::string &name, TensorElementType element_type, Args &&... args)
 
template<typename... Args>
bool createTensorSync (const std::string &name, TensorElementType element_type, Args &&... args)
 
TensorgetTensorRef (const std::string &name)
 
TensorElementType getTensorElementType (const std::string &name)
 
bool destroyTensor (const std::string &name)
 
bool destroyTensorSync (const std::string &name)
 
template<typename NumericType >
bool initTensor (const std::string &name, NumericType value)
 
template<typename NumericType >
bool initTensorSync (const std::string &name, NumericType value)
 
bool transformTensor (const std::string &name, std::shared_ptr< TensorMethod > functor)
 
bool transformTensorSync (const std::string &name, std::shared_ptr< TensorMethod > functor)
 
template<typename NumericType >
bool addTensors (const std::string &addition, NumericType alpha)
 
template<typename NumericType >
bool addTensorsSync (const std::string &addition, NumericType alpha)
 
template<typename NumericType >
bool contractTensors (const std::string &contraction, NumericType alpha)
 
template<typename NumericType >
bool contractTensorsSync (const std::string &contraction, NumericType alpha)
 
bool evaluateTensorNetwork (const std::string &name, const std::string &network)
 
bool evaluateTensorNetworkSync (const std::string &name, const std::string &network)
 
bool sync (const std::string &name, bool wait=true)
 
std::shared_ptr< talsh::Tensor > getLocalTensor (std::shared_ptr< Tensor > tensor, const std::vector< std::pair< DimOffset, DimExtent >> &slice_spec)
 
std::shared_ptr< talsh::Tensor > getLocalTensor (const std::string &name, const std::vector< std::pair< DimOffset, DimExtent >> &slice_spec)
 
std::shared_ptr< talsh::Tensor > getLocalTensor (const std::string &name)
 
void resetRuntimeLoggingLevel (int level=0)
 
template<typename Service >
std::shared_ptr< Service > getService (const std::string &serviceName)
 
template<typename Service >
bool hasService (const std::string &serviceName)
 
template<typename... Args>
std::shared_ptr< numerics::TensormakeSharedTensor (Args &&... args)
 
template<typename... Args>
std::shared_ptr< numerics::TensorNetworkmakeSharedTensorNetwork (Args &&... args)
 
bool parse_tensor (const std::string &tensor, std::string &tensor_name, std::vector< IndexLabel > &indices, bool &complex_conjugated)
 
bool parse_tensor_network (const std::string &network, std::vector< std::string > &tensors)
 
bool generate_contraction_pattern (const std::vector< numerics::TensorLeg > &pattern, unsigned int left_tensor_rank, unsigned int right_tensor_rank, std::string &symb_pattern)
 
bool is_letter (const char &ch)
 
bool is_number (const char &ch)
 
bool is_underscore (const char &ch)
 
bool is_space (const char &ch)
 
bool is_conjugation_sign (const char &ch)
 
bool is_equal_sign (const char &ch)
 
bool is_plus_sign (const char &ch)
 
bool is_minus_sign (const char &ch)
 
bool is_multiply_sign (const char &ch)
 
bool is_alphanumeric (const std::string &identifier)
 
std::pair< int, int > trim_spaces_off (const std::string &str, std::pair< int, int > view)
 

Variables

bool exatnFrameworkInitialized = false
 
std::shared_ptr< ServiceRegistryserviceRegistry = std::make_shared<ServiceRegistry>()
 
std::shared_ptr< NumServernumericalServer {nullptr}
 
constexpr DimExtent MAX_SPACE_DIM = 0xFFFFFFFFFFFFFFFF
 
constexpr SpaceId SOME_SPACE = 0
 
constexpr SubspaceId FULL_SUBSPACE = 0
 
constexpr SubspaceId UNREG_SUBSPACE = 0xFFFFFFFFFFFFFFFF
 

Detailed Description

ExaTN::Numerics: General client header REVISION: 2019/10/13

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: 1. Vector space and subspace registration: (a) Any unnamed vector space is automatically associated with a preregistered anonymous vector space wtih id = SOME_SPACE = 0. (b) Any explicitly registered (named) vector space has id > 0. (c) Any unregistered subspace of any named vector space has id = UNREG_SUBSPACE = max(uint64_t). (d) Every explicitly registered (named) vector space has an automatically registered full subspace (=space) under the same (space) name with id = FULL_SUBSPACE = 0. (e) Every registered non-trivial named subspace of any named vector space has id: 0 < id < max(uint64_t). (f) A subspace of the anonymous vector space is defined by the base offset (first basis vector belonging to it) and its dimension. 2. Index labels: (a) Any registered subspace can be assigned a symbolic index label serving as a placeholder for it; any index label can only refer to a single registered (named) subspace it is associated with.

ExaTN::Numerics: Numerical server REVISION: 2019/10/13

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Basis Vector REVISION: 2019/03/17

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Basis Vector REVISION: 2019/05/27

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Any basis vector spans a primitive 1-dimensional subspace; (b) Any space/subspace can be composed of linear-independent 1-dimensional subspaces by taking a direct sum of them. (c) Any abstract basis vector can further be specialized/concretized by introducing additional attributes peculiar to a specific basis kind.

ExaTN::Numerics: Tensor contraction sequence optimizer REVISION: 2019/10/01

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale:

ExaTN::Numerics: Tensor contraction sequence optimizer: Dummy REVISION: 2019/09/09

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor contraction sequence optimizer: Dummy REVISION: 2019/09/09

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale:

ExaTN::Numerics: Tensor contraction sequence optimizer factory REVISION: 2019/09/10

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor contraction sequence optimizer factory REVISION: 2019/09/10

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Creates tensor contraction sequence optimizers of desired kind.

ExaTN::Numerics: Tensor contraction sequence optimizer: Heuristics REVISION: 2019/10/02

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor contraction sequence optimizer: Heuristics REVISION: 2019/10/01

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale:

ExaTN::Numerics: Tensor Functor: Initialization to a scalar value REVISION: 2019/09/20

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor Functor: Initialization to a scalar value REVISION: 2019/09/20

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (A) This tensor functor (method) is used to initialize a Tensor to a scalar value, with the default of zero.

ExaTN::Numerics: Tensor network builder factory REVISION: 2019/11/01

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor network builder factory REVISION: 2019/11/01

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Creates tensor network builders of desired kind.

ExaTN::Numerics: Tensor network builder REVISION: 2019/11/01

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) A tensor network builder allows building complex tensor networks of a specific kind.

ExaTN::Numerics: Tensor network builder: MPS: Matrix Product State REVISION: 2019/11/05

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor network builder: MPS: Matrix Product State REVISION: 2019/11/01

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale:

ExaTN::Numerics: Tensor network builder: Tree: Tree Tensor Network REVISION: 2019/11/01

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor network builder: Tree: Tree Tensor Network REVISION: 2019/11/01

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale:

ExaTN::Numerics: Space Basis REVISION: 2019/03/18

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Space Basis REVISION: 2019/05/27

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Space basis is a set of linear-independent basis vectors. (b) An abstract space basis can further be specialized by storing specialized basis vectors with additional attributes. By default, an abstract space basis is only characterized by its dimension. (c) Space basis may additionally include symmetry subranges, that is, contiguous ranges of basis vectors assigned a specific symmetry id.

ExaTN::Numerics: Register of vector spaces and their subspaces REVISION: 2019/06/06

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Register of vector spaces and their subspaces REVISION: 2019/06/06

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Any unregistered vector space has id = SOME_SPACE = 0 (anonymous vector space). A subspace of the anonymous vector space is defined by the base offset (first basis vector) and its dimension. (b) Any explicitly registered (named) vector space has id > 0. (c) Any unregistered subspace of any registered vector space has id = UNREG_SUBSPACE = max(uint64_t). (d) Every named vector space has an automatically registered full subspace under the same (space) name with id = FULL_SUBSPACE = 0 (trivial subspace which spans the full space). (e) Every registered non-trivial subspace of any registered vector space has id: 0 < id < max(uint64_t).

ExaTN::Numerics: Spaces/Subspaces REVISION: 2019/07/07

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Spaces/Subspaces REVISION: 2019/06/06

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) An abstract vector space is defined by its dimension, N, making it a linear span of its N abstract basis vectors. Additonally, symmetry subranges can be defined within the space basis, that is, contiguous subranges of basis vectors can be assigned a specific symmetry id. (b) A specialized vector space is a span of linear-independent specialized basis vectors (specialized basis). (c) A subspace of a vector space is defined by its encompassing vector space and a range of basis vectors it is spanned over.

ExaTN::Numerics: Tensor REVISION: 2019/10/21

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Abstract Tensor REVISION: 2019/10/21

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) NOTES: Tensor specification requires: (a) Symbolic tensor name; (b) Tensor rank (number of tensor dimensions) and tensor shape (extents of all tensor dimensions); (c) Optional tensor signature (space/subspace identifier for all tensor dimensions). (d) Optional tensor element type (exatn::TensorElementType). (e) Optional isometries: An isometry is a group of tensor dimensions a contraction over which with the conjugated tensor results in a delta function over the remaining tensor dimensions, spit between the original and conjugated tensors. A tensor is isometric if it has at least one isometry group of dimensions. A tensor is unitary if its dimensions can be partioned into two non-overlapping groups such that both groups form an isometry.

Tensor signature identifies a full tensor or its slice. Tensor signature requires providing a pair<SpaceId,SubspaceId> for each tensor dimension. It has two alternative specifications: (a) SpaceId = SOME_SPACE: In this case, SubspaceId is the lower bound of the specified tensor slice (0 is the min lower bound). The upper bound is computed by adding the dimension extent to the lower bound - 1. The defining vector space (SOME_SPACE) is an abstract anonymous vector space. (b) SpaceId != SOME_SPACE: In this case, SpaceId refers to a registered vector space and the SubspaceId refers to a registered subspace of this vector space. The subspaces will carry lower/upper bounds of the specified tensor slice. SubspaceId = 0 refers to the full space, which is automatically registered when the space is registered. Although tensor dimension extents cannot exceed the dimensions of the corresponding registered subspaces from the tensor signature, they in general can be smaller than the latter (low-rank representation).

ExaTN: Tensor basic types and parameters REVISION: 2019/09/01

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor connected to other tensors inside a tensor network REVISION: 2019/10/16

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor connected to other tensors in a tensor network REVISION: 2019/10/16

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) A tensor inside a tensor network is generally connected to other tensors in that network via so-called tensor legs; each tensor leg is associated with a specific tensor dimension. (b) Each tensor leg specifies a connection of a given tensor dimension to some dimension (or dimensions) in another tensor (or tensors) in the same tensor network. Thus, tensor legs can be binary, ternary, etc., based on whether the tensor network is a graph or a hyper-graph. (c) The abstraction of a connected tensor is introduced for a quick inspection of the neighborhood of a chosen tensor inside the tensor network.

ExaTN::Numerics: Tensor network expansion REVISION: 2019/10/31

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor network expansion REVISION: 2019/10/31

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) A tensor network expansion is an ordered linear expansion consisting of tensor networks with complex coefficients. The output tensors of all constituting tensor networks must be congruent, that is, have the same shape and leg direction. The tensor network expansion is essentially a linear combination of tensor network vectors in a given tensor space. The rank of the tensor network expansion is the rank of the output tensors of all constituting tensor networks (they are the same). (b) A tensor network expansion can either be a ket or a bra. (c) An inner product tensor network expansion can be formed by contracting one tensor network expansion with another tensor network expansion from the dual vector space (bra*ket, ket*bra). (d) A direct product tensor network expansion can be formed from two tensor network expansions from the same space (bra*bra, ket*ket). (e) A tensor network operator can be applied to a tensor network expansion, producing another tensor network expansion in the same space.

ExaTN::Numerics: Tensor leg (connection) REVISION: 2019/10/21

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor leg (connection) REVISION: 2019/10/21

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) A tensor leg associates a tensor mode with a mode in another tensor by carrying the id of another tensor, its specific mode (position), and direction of the association.

ExaTN::Numerics: Tensor network REVISION: 2019/11/05

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor network REVISION: 2019/11/05

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) A tensor network is a set of connected tensors. Each tensor in a tensor network can be connected to other tensors in that tensor network via tensor legs. Each tensor leg in a given tensor is uniquely associated with one of its modes, one tensor leg per tensor mode. The numeration of tensor modes is contiguous and starts from 0. A tensor leg can connect a given tensor with one or more other tensors in the same tensor network. Thus, tensor legs can be binary, ternary, etc., in general (binary is common choice). (b) A tensor network is always closed, which requires introducing an explicit output tensor collecting all open legs of the original tensor network. If the original tensor network does not have open legs, the output tensor is simply a scalar which the original tensor network evaluates to; otherwise, a tensor network evaluates to a tensor. (c) Current tensor enumeration (it is just one option): 0: Output tensor/scalar which the tensor network evaluates to; 1..N: Input tensors/scalars constituting the original tensor network; N+1..M: Intermediate tensors obtained by contractions of the input tensors. In general, only the output tensor is required to have id = 0; any other tensor in the tensor network may have any unique positive id. (d) Building a tensor network: Option 1: A new tensor can be appended into a tensor network by either: (1) Explicitly matching the tensor modes with the modes of all other tensors present or to be present in the tensor network. The fully specified output tensor with all its legs has had to be provided in advance in the TensorNetwork ctor. This way requires the advance knowledge of the entire tensor network. Once all tensors have been appended, one needs to call .finalize() to complete the construction of the tensor network. (2) Matching the tensor modes with the modes of the current output tensor of the tensor network. In this case, the unmatched modes of the newly appended tensor will be appended to the current output tensor of the tensor network (at the end). Option 2: A tensor network can be appended to another tensor network by matching the modes of the output tensors of both tensor networks. The unmatched modes of the output tensor of the appended tensor network will be appended to the output tensor of the primary tensor network (at the end). The appended tensor network will cease to exist after being absorbed by the primary tensor network. (e) The modes of the output tensor of a tensor network can be examined and reordered. (f) Any tensor except the output tensor can be deleted from the tensor network. (g) Any two tensors, excluding the output tensor, can be merged by tensor contraction.

ExaTN::Numerics: Tensor operation: Adds a tensor to another tensor REVISION: 2019/08/30

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor operation: Adds a tensor to another tensor REVISION: 2019/08/30

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Adds a tensor to another tensor inside the processing backend.

ExaTN::Numerics: Tensor operation: Contracts two tensors and accumulates the result into another tensor REVISION: 2019/08/30

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor operation: Contracts two tensors and accumulates the result into another tensor REVISION: 2019/08/30

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Contracts two tensors and accumulates the result into another tensor inside the processing backend.

ExaTN::Numerics: Tensor operation: Creates a tensor REVISION: 2019/08/30

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor operation: Creates a tensor REVISION: 2019/08/30

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Creates a tensor inside the processing backend.

ExaTN::Numerics: Tensor operation: Destroys a tensor REVISION: 2019/08/30

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor operation: Destroys a tensor REVISION: 2019/08/30

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Destroys a tensor inside the processing backend.

ExaTN::Numerics: Tensor operation factory REVISION: 2019/09/10

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor operation factory REVISION: 2019/09/10

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Creates new tensor operations of desired kind.

ExaTN::Numerics: Tensor operation: Transforms/initializes a tensor REVISION: 2019/08/30

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor operation: Transforms/initializes a tensor REVISION: 2019/09/20

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Transforms/initializes a tensor inside the processing backend. Requires a user-provided talsh::TensorFunctor object to concretize the transformation/initilization operation.

ExaTN::Numerics: Tensor operation REVISION: 2019/10/13

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor operation REVISION: 2019/10/13

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) A tensor operation is a formal numerical operation on one or more tensors.

ExaTN::Numerics: Tensor operator REVISION: 2019/10/31

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor operator REVISION: 2019/10/31

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) A tensor network vector is a vector in a given tensor space with its expansion (tensor) coefficients factorized as a tensor network. A ket tensor network vector produces its corresponding dual bra tensor network vector upon complex conjugation of all constituting tensor factors and reversing the direction of the all tensor legs. (b) A tensor operator is an ordered linear combination of tensors and tensor networks in which the output tensor legs are distinguished as bra and ket tensor legs: The bra tensor legs contract with legs of a bra tensor network vector, the ket tensor legs contract with legs of a ket tensor network vector. (c) The first component of the tensor operator is applied first when acting on a ket vector. The last component of the tensor operator is applied first when acting on a bra vector. (d) The order of components of a tensor operator is reversed upon conjugation.

ExaTN::Numerics: Tensor shape REVISION: 2019/10/21

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor shape REVISION: 2019/10/21

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Tensor shape is an ordered set of tensor dimension extents. A scalar tensor (rank-0 tensor) has an empty shape.

ExaTN::Numerics: Tensor signature REVISION: 2019/10/21

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN::Numerics: Tensor signature REVISION: 2019/10/21

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) Rationale: (a) Tensor signature is an ordered set of tensor dimension specifiers, that is, specifiers of the subspaces tensor dimensions are spanned over; (b) Registered signature: Tensor dimension specifier consists of a Space Id and a Subspace Id, thus associating the tensor dimension with a specific registered subspace of a specific registered vector space. (c) Anonymous signature: Tensor dimension specifier consists of the Space Id = SOME_SPACE, while the Subspace Id specifies the offset (first basis vector) in SOME_SPACE.

ExaTN: Numerics: Symbolic tensor processing REVISION: 2019/09/12

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN: Numerics: Symbolic tensor processing REVISION: 2019/09/11

Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

Rationale: (a) Valid symbolic tensor formats are (angle brackets mark placeholders for tokens): (1) Scalar (rank-0 tensor): <TensorName><+>() Examples: S(), T+() (2) Tensor in an orthogonal basis: <TensorName><+>(<label>,<label>,...) Examples: Q(i1,a1), H+(p1,p2,q1,q2) (3) <TensorName><+>(<label>,<label>,...|<label>,<label>,...) Examples: Q(i1|a1), H+(p1,p2|q1,q2), R12(|a1,i3), L21(i1,b2|) where <TensorName> is an alphanumeric_ tensor name beginning with a letter; <+> is an optional complex conjugation sign; <label> is an alphanumeric_ index label beginning with a letter; In case the "|" separator is absent, all tensor indices are considered invariant, corresponding to undirected tensor legs (orthogonal basis). In case the "|" separator is present, tensor indices prior to "|" are considered contravariant, corresponding to OUTWARD tensor legs, whereas tensor indices after "|" are considered covariant, corresponding to INWARD tensor legs (this distinction becomes essential in non-orthogonal bases). (b) Valid symbolic tensor network formats: (a) <OutputTensor> = <InputTensor> * <InputTensor> * ... * <InputTensor> (b) <OutputTensor> += <InputTensor> * <InputTensor> * ... * <InputTensor> The number of tensors on the right-hand side is one or more.

ExaTN:: Tensor Runtime: Tensor graph executor: Eager REVISION: 2019/10/16

Copyright (C) 2018-2019 Tiffany Mintz, Dmitry Lyakh, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN:: Tensor Runtime: Tensor graph executor: Eager REVISION: 2019/10/04

Copyright (C) 2018-2019 Tiffany Mintz, Dmitry Lyakh, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

Rationale:

ExaTN:: Tensor Runtime: Tensor graph executor: Lazy REVISION: 2019/10/04

Copyright (C) 2018-2019 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

Rationale:

ExaTN:: Tensor Runtime: Tensor graph node executor: Exatensor REVISION: 2019/10/04

Copyright (C) 2018-2019 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN:: Tensor Runtime: Tensor graph node executor: Exatensor REVISION: 2019/10/04

Copyright (C) 2018-2019 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

Rationale:

ExaTN:: Tensor Runtime: Tensor graph node executor: Talsh REVISION: 2019/10/07

Copyright (C) 2018-2019 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN:: Tensor Runtime: Tensor graph node executor: Talsh REVISION: 2019/10/07

Copyright (C) 2018-2019 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

Rationale:

ExaTN:: Tensor Runtime: Tensor graph executor REVISION: 2019/10/13

Copyright (C) 2018-2019 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

Rationale: (a) Tensor graph executor traverses the tensor graph (DAG) and executes all its nodes while respecting node dependencies. Each DAG node is executed by a concrete tensor node executor (tensor operation stored in the DAG node accepts a polymorphic tensor node executor which then executes that tensor operation). The execution of each DAG node is generally asynchronous.

ExaTN:: Tensor Runtime: Tensor graph execution state REVISION: 2019/10/16

Copyright (C) 2018-2019 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN:: Tensor Runtime: Tensor graph execution state REVISION: 2019/10/16

Copyright (C) 2018-2019 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

Rationale: (a) Tensor graph is a directed acyclic graph in which vertices represent tensor operations and directed edges represent dependencies between them: A directed edge from node1 to node2 indicates that node1 depends on node2. Each DAG node has its unique integer vertex id (VertexIdType) returned when the node is appended into the DAG. (b) The tensor graph contains: 1. The DAG implementation (DirectedBoostGraph subclass); 2. The DAG execution state (TensorExecState data member). (c) The execution state of each Tensor in the DAG is either of the following: 1. None (no outstanding reads or writes on the Tensor); 2. Read (one or more most recently submitted tensor operations involving the Tensor perform reads on it). This is the READ epoch characterized by a positive integer equal to the number of outstanding reads on the Tensor in the current (read) epoch. 3. Write (most recent tensor operation on the Tensor is a write). This is the WRITE epoch characterized by a negative integer -1 denoting the single outstanding write on the Tensor in the current (write) epoch. The execution state of a Tensor is progressing through alternating read and write epochs, introducing read-after-write, write-after-write, and write-after-read dependencies between tensor nodes with stored tensor operations operating on the same data (Tensor). Importantly, the execution state of a Tensor is defined with respect to the DAG builder, that is, every time a new tensor operation is added into the DAG the execution state of each participating tensor is inspected and possibly altered (switched to another epoch). Thus, the execution state of a tensor is only used for establishing data dependencies for newly added DAG nodes, it has nothing to do with actual DAG execution.

ExaTN:: Tensor Runtime: Directed acyclic graph (DAG) of tensor operations REVISION: 2019/10/29

Copyright (C) 2018-2019 Tiffany Mintz, Dmitry Lyakh, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

Rationale: (a) The execution space consists of one or more DAGs in which nodes represent tensor operations (tasks) and directed edges represent dependencies between the corresponding nodes (tensor operations). Each DAG is associated with a uniquely named TAProL scope such that all tensor operations submitted by the Client to the ExaTN numerical server are forwarded into the DAG associated with the TaProL scope in which the Client currently resides. (b) The tensor graph contains: 1. The DAG implementation (in the DirectedBoostGraph subclass); 2. The DAG execution state (TensorExecState data member). (c) DEVELOPERS ONLY: The TensorGraph object provides lock/unlock methods for concurrent update of the DAG structure (by Client thread) and its execution state (by Execution thread). Public virtual methods of TensorGraph implemented in the DirectedBoostGraph subclass perform locking/unlocking from there. Other (non-virtual) public methods of TensorGraph perform locking/unlocking from here. Additionally each node of the TensorGraph (TensorOpNode object) provides more fine grained locking mechanism (lock/unlock methods) for providing exclusive access to individual DAG nodes, which is only related to TensorOpNode.getOperation() method since it returns a reference to the stored tensor operation (shared pointer reference), thus may require external locking for securing an exclusive access to this data member of TensorOpNode.

ExaTN:: Tensor Runtime: Task-based execution layer for tensor operations REVISION: 2019/10/20

Copyright (C) 2018-2019 Tiffany Mintz, Dmitry Lyakh, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

ExaTN:: Tensor Runtime: Task-based execution layer for tensor operations REVISION: 2019/10/29

Copyright (C) 2018-2019 Tiffany Mintz, Dmitry Lyakh, Alex McCaskey Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)

Rationale: (a) The execution space consists of one or more DAGs in which nodes represent tensor operations (tasks) and directed edges represent dependencies between the corresponding nodes (tensor operations). Each DAG is associated with a uniquely named TAProL scope such that all tensor operations submitted by the Client to the ExaTN numerical server are forwarded into the DAG associated with the TaProL scope in which the Client currently resides. (b) The DAG lifecycle: openScope(name): Opens a new TAProL scope and creates its associated empty DAG. The .submit method can then be used to append new tensor operations or whole tensor networks into the current DAG. The actual execution of the submitted tensor operations is asynchronous and may start any time after submission. pauseScope(): Completes the actual execution of all started tensor operations in the current DAG and defers the execution of the rest of the DAG for later. resumeScope(name): Pauses the execution of the currently active DAG (if any) and resumes the execution of a previously paused DAG, making it current. closeScope(): Completes all tensor operations in the current DAG and destroys it. (c) submit(TensorOperation): Submits a tensor operation for (generally deferred) execution. sync(TensorOperation): Tests for completion of a specific tensor operation. sync(tensor): Tests for completion of all submitted update operations on a given tensor. (d) Upon creation, the TensorRuntime object spawns an execution thread which will be executing tensor operations in the course of DAG traversal. The execution thread will be joined upon TensorRuntime destruction. After spawning the execution thread, the main thread returns control to the client which will then be able to submit new operations into the current DAG. The submitted operations will be autonomously executed by the execution thread. The DAG execution policy is specified by a polymorphic TensorGraphExecutor provided during the construction of the TensorRuntime. Correspondingly, the TensorGraphExecutor contains a polymorphic TensorNodeExecutor responsible for the actual execution of submitted tensor operations via an associated computational backend. The concrete TensorNodeExecutor is specified during the construction of the TensorRuntime oject. (e) DEVELOPERS ONLY: The TensorGraph object (DAG) provides lock/unlock methods for concurrent update of the DAG structure (by Client thread) and its execution state (by Execution thread). Additionally each node of the TensorGraph (TensorOpNode object) provides more fine grain locking mechanism (lock/unlock methods) for providing exclusive access to individual DAG nodes.

Function Documentation

◆ addTensors()

template<typename NumericType >
bool exatn::addTensors ( const std::string &  addition,
NumericType  alpha 
)
inline

Performs tensor addition: tensor0 += tensor1 * alpha

◆ closeScope()

ScopeId exatn::closeScope ( )
inline

Closes the currently open TAProL scope and returns its parental scope id.

◆ contractTensors()

template<typename NumericType >
bool exatn::contractTensors ( const std::string &  contraction,
NumericType  alpha 
)
inline

Performs tensor contraction: tensor0 += tensor1 * tensor2 * alpha

◆ createSubspace()

SubspaceId exatn::createSubspace ( const std::string &  subspace_name,
const std::string &  space_name,
const std::pair< DimOffset, DimOffset >  bounds,
const Subspace **  subspace_ptr = nullptr 
)
inline

Creates a named subspace of a named vector space, returns its registered id, and, optionally, a non-owning pointer to it.

◆ createTensor()

template<typename... Args>
bool exatn::createTensor ( const std::string &  name,
TensorElementType  element_type,
Args &&...  args 
)
inline

Declares, registers and actually creates a tensor via processing backend. See numerics::Tensor constructors for different creation options.

◆ createVectorSpace()

SpaceId exatn::createVectorSpace ( const std::string &  space_name,
DimExtent  space_dim,
const VectorSpace **  space_ptr = nullptr 
)
inline

Creates a named vector space, returns its registered id, and, optionally, a non-owning pointer to it.

◆ destroySubspace()

void exatn::destroySubspace ( const std::string &  subspace_name)
inline

Destroys a previously created named subspace of a named vector space.

◆ destroyTensor()

bool exatn::destroyTensor ( const std::string &  name)
inline

Destroys a tensor, including its backend representation.

◆ destroyVectorSpace()

void exatn::destroyVectorSpace ( const std::string &  space_name)
inline

Destroys a previously created named vector space.

◆ evaluateTensorNetwork()

bool exatn::evaluateTensorNetwork ( const std::string &  name,
const std::string &  network 
)
inline

Performs a full evaluation of a tensor network.

◆ finalize()

void exatn::finalize ( )

Finalizes ExaTN

◆ generate_contraction_pattern()

bool exatn::generate_contraction_pattern ( const std::vector< numerics::TensorLeg > &  pattern,
unsigned int  left_tensor_rank,
unsigned int  right_tensor_rank,
std::string &  symb_pattern 
)

Generates symbolic tensor contraction pattern from the digital tensor contraction pattern used by the contraction-based Tensor constructor: pattern[0..m-1] describes connectivity of dimensions of the left contracted tensor, pattern[m..m+n-1] decribes connectivity of dimensions of the right contracted tensor, where m and n are the ranks of the left and right contracted tensors, respectively. pattern[x] is a TensorLeg specifying the dimension of another tensor the described dimension is connected to, where the result tensor is tensor 0 while the left and right contracted tensors are tensors 1 and 2, respectively.

◆ getExternalData()

std::shared_ptr<BytePacket> exatn::getExternalData ( const std::string &  tag)
inline

Retrieves a registered external data packet.

◆ getLocalTensor()

std::shared_ptr<talsh::Tensor> exatn::getLocalTensor ( std::shared_ptr< Tensor tensor,
const std::vector< std::pair< DimOffset, DimExtent >> &  slice_spec 
)
inline

Returns a locally stored tensor slice (talsh::Tensor) providing access to tensor elements. This slice will be extracted from the exatn::numerics::Tensor implementation as a copy. The returned future becomes ready once the execution thread has retrieved the slice copy.

◆ getSubspace()

const Subspace* exatn::getSubspace ( const std::string &  subspace_name)
inline

Returns a non-owning pointer to a previosuly registered named subspace of a previously registered named vector space.

◆ getTensorElementType()

TensorElementType exatn::getTensorElementType ( const std::string &  name)
inline

Returns the tensor element type.

◆ getTensorMethod()

std::shared_ptr<TensorMethod> exatn::getTensorMethod ( const std::string &  tag)
inline

Retrieves a registered external tensor method.

◆ getTensorRef()

Tensor& exatn::getTensorRef ( const std::string &  name)
inline

Returns the reference to the actual tensor object.

◆ initialize()

void exatn::initialize ( )

Initializes ExaTN

◆ initTensor()

template<typename NumericType >
bool exatn::initTensor ( const std::string &  name,
NumericType  value 
)
inline

Initializes a tensor to some scalar value.

◆ is_alphanumeric()

bool exatn::is_alphanumeric ( const std::string &  identifier)
inline

Returns TRUE if the symbolic identifier is alphanumeric_ and starts with a letter.

◆ isInitialized()

bool exatn::isInitialized ( )

Returns whether or not ExaTN has been initialized

◆ openScope()

ScopeId exatn::openScope ( const std::string &  scope_name)
inline

Opens a new (child) TAProL scope and returns its id.

◆ parse_tensor()

bool exatn::parse_tensor ( const std::string &  tensor,
std::string &  tensor_name,
std::vector< IndexLabel > &  indices,
bool &  complex_conjugated 
)

Returns TRUE if the tensor parses as a valid symbolic tensor. The output function parameters will contain parsed tokens.

◆ parse_tensor_network()

bool exatn::parse_tensor_network ( const std::string &  network,
std::vector< std::string > &  tensors 
)

Returns TRUE if the tensor network parses as a valid symbolic tensor network. The output std::vector returns parsed symbolic tensors where element #0 is the output tensor of the tensor network.

◆ registerExternalData()

void exatn::registerExternalData ( const std::string &  tag,
std::shared_ptr< BytePacket >  packet 
)
inline

Registers an external data packet.

◆ registerTensorMethod()

void exatn::registerTensorMethod ( const std::string &  tag,
std::shared_ptr< TensorMethod >  method 
)
inline

Registers an external tensor method.

◆ resetRuntimeLoggingLevel()

void exatn::resetRuntimeLoggingLevel ( int  level = 0)
inline

Resets tensor runtime logging level (0:none).

◆ sync()

bool exatn::sync ( const std::string &  name,
bool  wait = true 
)
inline

Synchronizes all outstanding update operations on a given tensor.

◆ transformTensor()

bool exatn::transformTensor ( const std::string &  name,
std::shared_ptr< TensorMethod >  functor 
)
inline

Transforms (updates) a tensor according to a user-defined tensor functor.

◆ trim_spaces_off()

std::pair<int,int> exatn::trim_spaces_off ( const std::string &  str,
std::pair< int, int >  view 
)
inline

Returns the string view range without leading and trailing spaces.

Variable Documentation

◆ numericalServer

std::shared_ptr< NumServer > exatn::numericalServer {nullptr}

Numerical server (singleton)

Numerical service singleton (numerical server)