Linear algebra with symmetric tensors#

Basic algebra operations#

    def test_syntax_basic_algebra(self):
        legs = [yastn.Leg(config_U1, s=-1, t=(-1, 0, 1), D=(1, 2, 3)),
                yastn.Leg(config_U1, s=1, t=(-1, 1, 2), D=(4, 5, 6)),
                yastn.Leg(config_U1, s=1, t=(-1, 1, 2), D=(7, 8, 9)),
                yastn.Leg(config_U1, s=-1, t=(-1, 1, 2), D=(10, 11, 12))]

        a = yastn.rand(config=config_U1, legs=legs)

        #
        # Tensor can be multiplied by scalar
        #
        tensor = a / 2
        tensor = 2. * a
        tensor = a * 2.

        #
        # Tensors can be added or subtracted assuming their structure is
        # compatible.
        #
        b = yastn.ones(config=config_U1, legs=legs)
        tensor = a + b
        tensor = a - b

        #
        # Attempting to add/subtract two tensors with different total charge,
        # or different dimension of a common charge sector raises exception
        #
        legs[0] = yastn.Leg(config_U1, s=-1, t=(-1, 0, 1), D=(7, 2, 3))
        c = yastn.ones(config=config_U1, legs=legs)
        with self.assertRaises(Exception):
            tensor = a + c

        #
        # element-wise exponentiation, absolute value, reciprocal i.e. x -> 1/x,
        # square root and its reciprocal x -> 1/sqrt(x)
        #
        tensor = a.exp(step=1)
        tensor = yastn.exp(a, step=1)

        tensor = abs(a)

        tensor = a.reciprocal(cutoff=1e-12)
        tensor = yastn.reciprocal(a, cutoff=1e-12)

        tensor = abs(a).sqrt()
        tensor = yastn.sqrt(abs(a))

        tensor = abs(a).rsqrt(cutoff=1e-12)
        tensor = yastn.rsqrt(abs(a), cutoff=1e-12)


        #
        # Sometimes a composite operation is faster than serial execution of individual
        # operations. For example, multiplication by scalar and addition, a + x*b,
        # are handled by specialized function
        #
        tensor = a.apxb(b, x=1)
        tensor = yastn.apxb(a, b, x=1)

Tensor contractions#

Basic contractions with yastn.tensordot(), matrix-multiplication operator @, tracing with yastn.trace()

    def test_syntax_contraction(self):
        # create a set of U1-symmetric tensors
        leg1 = yastn.Leg(config_U1, s=-1, t=(-1, 0, 1), D=(1, 2, 3))
        leg2 = yastn.Leg(config_U1, s=1, t=(-1, 1, 2), D=(4, 5, 6))
        leg3 = yastn.Leg(config_U1, s=1, t=(-1, 1, 2), D=(7, 8, 9))
        leg4 = yastn.Leg(config_U1, s=-1, t=(-1, 1, 2), D=(10, 11, 12))

        a = yastn.rand(config=config_U1, legs=[leg1, leg2, leg3, leg4])
        b = yastn.ones(config=config_U1, legs=[leg1, leg2, leg3, leg4])
        c = yastn.rand(config=config_U1, legs=[leg4.conj(), leg3, leg2.conj()])

        # Contract a and b by two indices. The a tensor is conjugated, which
        # reverses the signature on its indices
        #       __           _                ___
        #  0->-|a*|->-1 1->-|b|->-0 =    0->-|a*b|->-0->2
        #  3->-|__|->-2 2->-|_|->-3   1<-3->-|___|->-3
        #
        # The order of the indices on the resulting tensor is as follows:
        # First, the outgoing indices of a (the first argument to tensordot), then
        # the outgoing indices of tensor b
        tensor = yastn.tensordot(a, b, axes=((1, 2), (1, 2)), conj=(1, 0))

        # tensordot can also be invoked also as a function of the tensor itself
        #
        tensor = a.tensordot(b, axes=((1, 2), (1, 2)), conj=(1, 0))

        # If no axes are specified, the outer product of two tensors is returned
        tensor = yastn.tensordot(c, c, axes=((), ()) )
        assert tensor.get_rank() == 6


        # A shorthand notation for the specific contraction
        #      _           _             __
        # 0-<-|a|-<-2     |c|-<-1 = 0-<-|ac|-<-2
        # 1->-|_|->-3 0->-|_|->-2   1->-|  |-<-1->3
        #                               |__|->-2->4
        t0 = yastn.tensordot(a, c, axes=(a.ndim - 1, 0))
        #
        # is the @ operator. For rank-2 tensor it is thus equivalent to matrix multiplication
        t1 = a @ c
        assert yastn.norm(t0 - t1) < tol
        #
        # Utility functions simplifying execution of contractions
        t2 = yastn.ncon([a, c], ((-0, -1, -2, 1), (1, -3, -4)))
        t3 = yastn.einsum('ijkx,xlm->ijklm', a, c)
        assert yastn.norm(t0 - t2) < tol
        assert yastn.norm(t0 - t3) < tol


        # Another special case of tensor contraction is a dot product of vectorized tensors
        #  __           _
        # |a*|-<-0 0-<-|b| = scalar
        # |  |->-1 1->-| |
        # |  |->-2 2->-| |
        # |__|-<-3 3-<-|_|
        tensor = a.tensordot(b, axes=((0, 1, 2, 3), (0, 1, 2, 3)), conj=(1, 0))
        assert isinstance(tensor,yastn.Tensor)
        #
        # such single element symmetric Tensor can be converted to a single-element
        # tensor of the backend type, or even further to python scalar
        number = tensor.to_number()
        python_scalar = tensor.item()
        assert isinstance(python_scalar,float)

        # A shorthand function for computing dot products is vdot
        number = yastn.vdot(a, b)
        number = a.vdot(b)

        # Trace over certain indices can be computed using identically named function.
        # In this case, a2_ijil = a2_jl
        a2 = yastn.tensordot(a, a, axes=((0, 1), (0, 1)), conj=(1, 0))
        tensor = a2.trace(axes=(0, 2))
        assert tensor.get_rank()==2
        #
        # More pairs of indices can be traced at once a_ijij = scalar
        tensor = a2.trace(axes=((0, 1), (2, 3)))
        number = tensor.to_number()

Higher-level interface ncon (or equivalently einsum) composing simple contractions

def test_ncon_einsum_syntax():
    # create a set of U1-symmetric tensors
    a = yastn.rand(config=config_U1, s=[-1, 1, -1], n=0,
                  D=((20, 10), (3, 3), (1, 1)), t=((1, 0), (1, 0), (1, 0)))
    b = yastn.rand(config=config_U1, s=[1, 1, 1], n=1,
                  D=((4, 4), (2, 2), (20, 10)), t=((1, 0), (1, 0), (1, 0)))
    c = yastn.rand(config=config_U1, s=[1, 1, 1, -1], n=1,
                  D=((20, 10), (30, 20), (10, 5), (10, 5)), t=((1, 0), (1, 0), (1, 0), (1, 0)))
    d = yastn.rand(config=config_U1, s=[1, 1, -1, -1], n=0,
                  D=((30, 20), (10, 5), (20, 10), (10, 5)), t=((1, 0), (1, 0), (1, 0), (1, 0)))

    # Perform basic contraction of two tensors - equivalent to a single tensordot call
    #           _                 _                        _    _
    #  (-1) 1--|a|--0 (1) (1) 2--|b|--0 (-0) = (-1)    1--|a|--|b|--0    (-0)
    #  (-3) 2--|_|               |_|--1 (-2)   (-3) 3<-2--|_|  |_|--1->2 (-2)
    #
    # The uncontracted indices, labeled by negative integers, are ordered according in
    # descending fashion on resulting tensor
    e = yastn.ncon([a, b], [[1, -1, -3], [-0, -2, 1]])
    assert e.get_shape() == (8, 6, 4, 2)

    # The same can be obtained using einsum function, which tries to mimic the syntax of np.einsum
    e1 = yastn.einsum('xbd,acx->abcd', a, b)
    assert yastn.norm(e1 - e) < tol

    # Network composed of several tensors can be contracted by a single ncon call,
    # including traces and conjugations
    #           _                 _                       _    _    __    __
    #  (-2) 1--|a|--0 (4) (4) 0--|c|--2 (1) = (-2) 2<-1--|a|--|c|--|d*|--|b*|--0->3 (-3)
    #  (-0) 2--|_|               |_|--3 (1)   (-0) 0<-2--|_|  |_|  |__|  |__|--1->1 (-1)
    #                             |
    #                             1 (3)
    #                             0 (3)
    #           __                |_
    #  (-3) 0--|b*|--2(5) (5) 2--|d*|--1 (2)
    #  (-1) 1--|__|              |__|--3 (2)
    #
    f = yastn.ncon([a, b, c, d], [[4, -2, -0], [-3, -1, 5], [4, 3, 1, 1], [3, 2, 5, 2]],
                  conjs=(0, 1, 0, 1))
    assert f.get_shape() == (2, 4, 6, 8)

    # In yastn.einsum(subscripts, *operands, order='Alphabetic')
    #
    # order specifies the order of contraction, otherwise alphabetic order of contracted indices is used
    # character '*' can be used in einsum subscripts to conjugate respective tensor
    # spaces in subscripts are ignored
    f1 = yastn.einsum('nCA, *DBo, nmkk, *mlol -> ABCD', a, b, c, d, order='klmno')
    assert yastn.norm(f1 - f) < tol

Transposition#

    def test_transpose_syntax(self):
        #
        # define rank-6 U1-symmetric tensor
        #
        a = yastn.ones(config=config_U1, s=(-1, -1, -1, 1, 1, 1),
                  t=[(0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1)],
                  D=[(2, 3), (4, 5), (6, 7), (6, 5), (4, 3), (2, 1)])

        #
        # for each leg its dense dimension is given by the sum of dimensions
        # of individual sectors. Hence, the (dense) shape of this tensor
        # is (2+3, 4+5, 6+7, 6+5, 4+3, 2+1)
        assert a.get_shape() == (5, 9, 13, 11, 7, 3)

        #
        # permute the legs of the tensor and check the shape is changed
        # accordingly
        b = a.transpose(axes=(0, 2, 4, 1, 3, 5))
        assert b.get_shape() == (5, 13, 7, 9, 11, 3)

        #
        #  if axes is not provided, reverse the order
        #  This can be also done using a shorthand self.T
        a.transpose().get_shape() == (3, 7, 11, 13, 9, 5)
        a.T.get_shape() == (3, 7, 11, 13, 9, 5)

        #
        # sometimes, instead of writing explicit permutation of all legs
        # it is more convenient to only specify pairs of legs to switched.
        # In this example, we reverse the permutation done previously thus
        # ending up with tensor numerically identical to a.
        #
        c = b.move_leg(source=(1, 2), destination=(2, 4))
        assert c.get_shape() == a.get_shape()
        assert yastn.norm(a - c) < tol

Fusion (reshaping)#

Following example showcases fusion, in particular its hard mode. In this case, the tensor data is reshuffled/resized in memory.

    def test_fuse_hard(self):
        # define a rank-5 U1-symmetric tensor
        a = yastn.rand(config=config_U1, s=(-1, 1, 1, -1, 1,),
                      t=((0, 1), (0, 1), (0, 1), (0, 1), (0, 1)),
                      D=((1, 2), (3, 4), (5, 6), (7, 8), (9, 10)))

        # this tensor has 10 non-zero blocks, the largest one being
        # of shape (2, 4, 6, 8, 9) with total number of 3456 elements
        a.print_blocks_shape()
        #
        # charges         shape
        # (0, 0, 0, 0, 0) (1, 3, 5, 7, 9)
        # (0, 0, 0, 1, 1) (1, 3, 5, 8, 10)
        # (0, 0, 1, 1, 0) (1, 3, 6, 8, 9)
        # (0, 1, 0, 1, 0) (1, 4, 5, 8, 9)
        # (1, 0, 0, 0, 1) (2, 3, 5, 7, 10)
        # (1, 0, 1, 0, 0) (2, 3, 6, 7, 9)
        # (1, 0, 1, 1, 1) (2, 3, 6, 8, 10)
        # (1, 1, 0, 0, 0) (2, 4, 5, 7, 9)
        # (1, 1, 0, 1, 1) (2, 4, 5, 8, 10)
        # (1, 1, 1, 1, 0) (2, 4, 6, 8, 9)

        # Lets fuse last three legs of the tensor a into a new leg
        b = a.fuse_legs(axes=(0, 1, (2, 3, 4)), mode='hard')

        # resulting tensor has just four non-zero blocks, with the largest
        # one holding 9176 elements
        b.print_blocks_shape()
        #
        # (0, 0, 0) (1, 3, 1147)
        # (0, 1, -1) (1, 4, 360)
        # (1, 0, 1) (2, 3, 1208)
        # (1, 1, 0) (2, 4, 1147)

        # We can also fuse more than a single group of indices.
        # Their order can be permuted as well
        c0 = a.fuse_legs(axes=(0, (3, 4), (2, 1)), mode='hard')

        # Fusion can be applied successively - fusing fused spaces together.
        # This results in a rank-2 tensor, equivalent to block-sparse matrix
        c1 = c0.fuse_legs(axes=((0, 1), 2), mode='hard')

        # Resulting matrix has three blocks and the largest block holds 13604 elements
        assert c1.s == (-1, 1)
        c1.print_blocks_shape()
        #
        # (0, 0) (283, 15)
        # (1, 1) (358, 38)
        # (2, 2) (144, 24)

        # The fusion can be simply reverted, proceeding step-by-step in reverse
        # order of the applied fusions.
        # NOTE: Unfusing an index *does not* permute the resulting indices into
        #       into their original order
        #
        # fusion step 1: 0 1 2 3 4 -> permute -> 0 3 4 2 1 -> fuse -> 0 (3 4) (2 1) = 0 1 2
        # fusion step 2: 0 1 2 -> fuse -> (0 1) 2 = 0 1
        #
        # unfuse step 2: 0 1 -> unfuse -> (0 1) 1->2 = 0 1 2
        c0_0 = c1.unfuse_legs(axes=0)
        assert yastn.norm(c0_0 - c0) < tol

        # unfuse step 1: 0 1 2 -> unfuse -> 0 (3->1 4->2) (2->3 1->4) = 0 1 2 3 4
        #
        # Hence, to retrieve original tensor, we have to permute its indices
        a_0 = c0_0.unfuse_legs(axes=(1, 2))
        a_0 = a_0.transpose(axes=(0, 4, 3, 1, 2))
        assert yastn.norm(a - a_0) < tol

Conjugation of symmetric tensors#

    def test_conj_Z2xU1(self):
        #
        # create random complex-valued symmetric tensor with symmetry Z2 x U1
        #
        legs = [yastn.Leg(config_Z2xU1, s=1, t=((0, 2), (1, 1), (1, 2)), D=(1, 2, 3)),
                yastn.Leg(config_Z2xU1, s=-1, t=((0, 0), (0, -1), (1, 0)), D=(4, 5, 6))]
        a = yastn.rand(config=config_Z2xU1, legs=legs, n=(1, 2), dtype="complex128")
        #
        # conjugate tensor a: verify that signature and total charge
        # has been reversed.
        #
        b = a.conj()
        assert b.get_tensor_charge() == (1, -2)
        assert b.get_signature() == (-1, 1)

        #
        # Interpreting these tensors a,b as vectors, following contraction
        #  _           _
        # | |-<-0 0-<-| |
        # |a|->-1 1->-|b|
        #
        # is equivalent to computing the square of Frobenius norm of a.
        # Result is chargeless single-element tensor equivalent to scalar.
        #
        norm_F = yastn.tensordot(a, b, axes=((0, 1), (0, 1)))
        assert norm_F.get_tensor_charge() == (0, 0)
        assert norm_F.get_signature() == ()
        assert abs(a.norm()**2 - norm_F.real().to_number()) < tol

        #
        # only complex-conjugate elements of the blocks of tensor a, leaving
        # the structure i.e. signature and total charge intact.
        #
        c = a.conj_blocks()
        assert c.get_tensor_charge() == a.get_tensor_charge()
        assert c.get_signature() == a.get_signature()

        #
        # flip signature of the tensor c and its total charge, but do not
        # complex-conjugate elements of its block
        #
        d = c.flip_signature()
        assert d.get_tensor_charge() == b.get_tensor_charge()
        assert d.get_signature() == b.get_signature()

        #
        # conj() is equivalent to flip_signature().conj_blocks() (or in the
        # opposite order). Hence, tensor b and tensor d should be numerically
        # identical
        #
        assert yastn.norm(b - d)<tol