Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
"""Returns True if x is zero."""
"""Wrong matrix shape"""
"""A vector whose components are deferred (e.g. for use with lambdify)
Examples ========
>>> from sympy import DeferredVector, lambdify >>> X = DeferredVector( 'X' ) >>> X X >>> expr = (X[0] + 2, X[2] + 3) >>> func = lambdify( X, expr) >>> func( [1, 2, 3] ) (3, 6) """ i = 0 raise IndexError('DeferredVector index out of range')
return sstr(self)
return "DeferredVector('%s')" % (self.name)
# Added just for numpy compatibility
def _handle_creation_inputs(cls, *args, **kwargs): """Return the number of rows, cols and flat matrix elements.
Examples ========
>>> from sympy import Matrix, I
Matrix can be constructed as follows:
* from a nested list of iterables
>>> Matrix( ((1, 2+I), (3, 4)) ) Matrix([ [1, 2 + I], [3, 4]])
* from un-nested iterable (interpreted as a column)
>>> Matrix( [1, 2] ) Matrix([ [1], [2]])
* from un-nested iterable with dimensions
>>> Matrix(1, 2, [1, 2] ) Matrix([[1, 2]])
* from no arguments (a 0 x 0 matrix)
>>> Matrix() Matrix(0, 0, [])
* from a rule
>>> Matrix(2, 2, lambda i, j: i/(j + 1) ) Matrix([ [0, 0], [1, 1/2]])
"""
# Matrix(SparseMatrix(...))
# Matrix(Matrix(...))
# Matrix(MatrixSymbol('X', 2, 2))
# Matrix(numpy.ones((2, 2))) # NumPy array or matrix or some other object that implements # __array__. So let's first use this method to get a # numpy.array() and then make a python list out of it. else: raise NotImplementedError( "SymPy supports just 1D and 2D matrices")
# Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]]) and not isinstance(args[0], DeferredVector): else: sorted(list(ncol)))
# Matrix(2, 2, lambda i, j: i+j) [cls._sympify(op(cls._sympify(i), cls._sympify(j))) for j in range(cols)])
# Matrix(2, 2, [1, 2, 3, 4])
# Matrix() # Empty Matrix
"""Helper to set value at location given by key.
Examples ========
>>> from sympy import Matrix, I, zeros, ones >>> m = Matrix(((1, 2+I), (3, 4))) >>> m Matrix([ [1, 2 + I], [3, 4]]) >>> m[1, 0] = 9 >>> m Matrix([ [1, 2 + I], [9, 4]]) >>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m is the number of columns:
>>> M = zeros(4) >>> m = M.cols >>> M[3*m] = ones(1, m)*2; M Matrix([ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M Matrix([ [0, 0, 4, 0], [0, 0, 4, 0], [0, 0, 4, 0], [2, 2, 4, 2]]) """
raise ValueError('unexpected value: %s' % value) else: not isinstance(value, Basic) and is_sequence(value)): key = (slice(*divmod(i, self.cols)), slice(*divmod(j, self.cols))) else: slice(j, j + value.cols)) else:
r""" Returns the inverse of the matrix `K` (mod `m`), if it exists.
Method to find the matrix inverse of `K` (mod `m`) implemented in this function:
* Compute `\mathrm{adj}(K) = \mathrm{cof}(K)^t`, the adjoint matrix of `K`.
* Compute `r = 1/\mathrm{det}(K) \pmod m`.
* `K^{-1} = r\cdot \mathrm{adj}(K) \pmod m`.
Examples ========
>>> from sympy import Matrix >>> A = Matrix(2, 2, [1, 2, 3, 4]) >>> A.inv_mod(5) Matrix([ [3, 1], [4, 2]]) >>> A.inv_mod(3) Matrix([ [1, 1], [0, 1]])
"""
"""Conjugate transpose or Hermitian conjugation."""
def H(self): """Return Hermite conjugate.
Examples ========
>>> from sympy import Matrix, I >>> m = Matrix((0, 1 + I, 2, 3)) >>> m Matrix([ [ 0], [1 + I], [ 2], [ 3]]) >>> m.H Matrix([[0, 1 - I, 2, 3]])
See Also ========
conjugate: By-element conjugation D: Dirac conjugation """
def D(self): """Return Dirac conjugate (if self.rows == 4).
Examples ========
>>> from sympy import Matrix, I, eye >>> m = Matrix((0, 1 + I, 2, 3)) >>> m.D Matrix([[0, 1 - I, -2, -3]]) >>> m = (eye(4) + I*eye(4)) >>> m[0, 3] = 2 >>> m.D Matrix([ [1 - I, 0, 0, 0], [ 0, 1 - I, 0, 0], [ 0, 0, -1 + I, 0], [ 2, 0, 0, -1 + I]])
If the matrix does not have 4 rows an AttributeError will be raised because this property is only defined for matrices with 4 rows.
>>> Matrix(eye(2)).D Traceback (most recent call last): ... AttributeError: Matrix has no attribute D.
See Also ========
conjugate: By-element conjugation H: Hermite conjugation """ # In Python 3.2, properties can only return an AttributeError # so we can't raise a ShapeError -- see commit which added the # first line of this inline comment. Also, there is no need # for a message since MatrixBase will raise the AttributeError
from .dense import matrix2numpy return matrix2numpy(self)
"""Return the number of elements of self.
Implemented mainly so bool(Matrix()) == False. """
def shape(self): """The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples ========
>>> from sympy.matrices import zeros >>> M = zeros(2, 3) >>> M.shape (2, 3) >>> M.rows 2 >>> M.cols 3 """
return (-self) + a
"""Return self*other where other is either a scalar or a matrix of compatible dimensions.
Examples ========
>>> from sympy.matrices import Matrix >>> A = Matrix([[1, 2, 3], [4, 5, 6]]) >>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]]) True >>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> A*B Matrix([ [30, 36, 42], [66, 81, 96]]) >>> B*A Traceback (most recent call last): ... ShapeError: Matrices size mismatch. >>>
See Also ========
matrix_multiply_elementwise """ reduce(lambda k, l: k + l, [a_ik * b_kj for a_ik, b_kj in zip(alst[i], blst[j])])) else: [i*other for i in self._mat])
raise NotImplementedError( "Implemented only for diagonalizable matrices") else: raise NotImplementedError( "Only integer and rational values are supported")
"""Return self + other, raising ShapeError if shapes don't match.""" raise TypeError('cannot add matrix and %s' % type(other))
"""Returns self*b
See Also ========
dot cross multiply_elementwise """ return self*b
"""Return self + b """
colsep=', ', align='right'): r""" String form of Matrix as a table.
``printer`` is the printer to use for on the elements (generally something like StrPrinter())
``rowstart`` is the string used to start each row (by default '[').
``rowend`` is the string used to end each row (by default ']').
``rowsep`` is the string used to separate rows (by default a newline).
``colsep`` is the string used to separate columns (by default ', ').
``align`` defines how the elements are aligned. Must be one of 'left', 'right', or 'center'. You can also use '<', '>', and '^' to mean the same thing, respectively.
This is used by the string printer for Matrix.
Examples ========
>>> from sympy import Matrix >>> from sympy.printing.str import StrPrinter >>> M = Matrix([[1, 2], [-33, 4]]) >>> printer = StrPrinter() >>> M.table(printer) '[ 1, 2]\n[-33, 4]' >>> print(M.table(printer)) [ 1, 2] [-33, 4] >>> print(M.table(printer, rowsep=',\n')) [ 1, 2], [-33, 4] >>> print('[%s]' % M.table(printer, rowsep=',\n')) [[ 1, 2], [-33, 4]] >>> print(M.table(printer, colsep=' ')) [ 1 2] [-33 4] >>> print(M.table(printer, align='center')) [ 1 , 2] [-33, 4] >>> print(M.table(printer, rowstart='{', rowend='}')) { 1, 2} {-33, 4} """ # Handle zero dimensions: return '[]' # Build table of string representations of the elements # Track per-column max lengths for pretty alignment # Patch strings together 'left': 'ljust', 'right': 'rjust', 'center': 'center', '<': 'ljust', '>': 'rjust', '^': 'center', }[align]
from sympy.printing.str import StrPrinter printer = StrPrinter() # Handle zero dimensions: return 'Matrix(%s, %s, [])' % (self.rows, self.cols) return "Matrix([%s])" % self.table(printer, rowsep=',\n')
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
return sstr(self)
"""Returns the Cholesky decomposition L of a matrix A such that L * L.T = A
A must be a square, symmetric, positive-definite and non-singular matrix.
Examples ========
>>> from sympy.matrices import Matrix >>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11))) >>> A.cholesky() Matrix([ [ 5, 0, 0], [ 3, 3, 0], [-1, 1, 3]]) >>> A.cholesky() * A.cholesky().T Matrix([ [25, 15, -5], [15, 18, 0], [-5, 0, 11]])
See Also ========
LDLdecomposition LUdecomposition QRdecomposition """
"""Returns the LDL Decomposition (L, D) of matrix A, such that L * D * L.T == A This method eliminates the use of square root. Further this ensures that all the diagonal entries of L are 1. A must be a square, symmetric, positive-definite and non-singular matrix.
Examples ========
>>> from sympy.matrices import Matrix, eye >>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11))) >>> L, D = A.LDLdecomposition() >>> L Matrix([ [ 1, 0, 0], [ 3/5, 1, 0], [-1/5, 1/3, 1]]) >>> D Matrix([ [25, 0, 0], [ 0, 9, 0], [ 0, 0, 9]]) >>> L * D * L.T * A.inv() == eye(A.rows) True
See Also ========
cholesky LUdecomposition QRdecomposition """
"""Solves Ax = B, where A is a lower triangular matrix.
See Also ========
upper_triangular_solve gauss_jordan_solve cholesky_solve diagonal_solve LDLsolve LUsolve QRsolve pinv_solve """
"""Solves Ax = B, where A is an upper triangular matrix.
See Also ========
lower_triangular_solve gauss_jordan_solve cholesky_solve diagonal_solve LDLsolve LUsolve QRsolve pinv_solve """
"""Solves Ax = B using Cholesky decomposition, for a general square non-singular matrix. For a non-square matrix with rows > cols, the least squares solution is returned.
See Also ========
lower_triangular_solve upper_triangular_solve gauss_jordan_solve diagonal_solve LDLsolve LUsolve QRsolve pinv_solve """ L = self._cholesky() else: raise NotImplementedError('Under-determined System. ' 'Try M.gauss_jordan_solve(rhs)')
"""Solves Ax = B efficiently, where A is a diagonal Matrix, with non-zero diagonal entries.
Examples ========
>>> from sympy.matrices import Matrix, eye >>> A = eye(2)*2 >>> B = Matrix([[1, 2], [3, 4]]) >>> A.diagonal_solve(B) == B/2 True
See Also ========
lower_triangular_solve upper_triangular_solve gauss_jordan_solve cholesky_solve LDLsolve LUsolve QRsolve pinv_solve """ raise TypeError("Matrix should be diagonal")
"""Solves Ax = B using LDL decomposition, for a general square and non-singular matrix.
For a non-square matrix with rows > cols, the least squares solution is returned.
Examples ========
>>> from sympy.matrices import Matrix, eye >>> A = eye(2)*2 >>> B = Matrix([[1, 2], [3, 4]]) >>> A.LDLsolve(B) == B/2 True
See Also ========
LDLdecomposition lower_triangular_solve upper_triangular_solve gauss_jordan_solve cholesky_solve diagonal_solve LUsolve QRsolve pinv_solve """ L, D = self.LDLdecomposition() else: raise NotImplementedError('Under-determined System. ' 'Try M.gauss_jordan_solve(rhs)')
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other methods of matrix inversion can be used. To find out which are available, see the docstring of the .inv() method.
Examples ========
>>> from sympy.matrices import Matrix, ones >>> A = Matrix([1, 2, 3]) >>> B = Matrix([2, 3, 4]) >>> S = Matrix(A.row_join(B)) >>> S Matrix([ [1, 2], [2, 3], [3, 4]])
If each line of S represent coefficients of Ax + By and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r Matrix([ [ 8], [13], [18]])
But let's add 1 to the middle value and then solve for the least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy Matrix([ [ 5/3], [10/3]])
The error is given by S*xy - r:
>>> S*xy - r Matrix([ [1/3], [1/3], [1/3]]) >>> _.norm().n(2) 0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10 >>> (S*xy - r).norm().n(2) 1.5
""" if method == 'CH': return self.cholesky_solve(rhs) t = self.T return (t*self).inv(method=method)*t*rhs
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring. """ if not self.is_square: if self.rows < self.cols: raise ValueError('Under-determined system. ' 'Try M.gauss_jordan_solve(rhs)') elif self.rows > self.cols: raise ValueError('For over-determined system, M, having ' 'more rows than columns, try M.solve_least_squares(rhs).') else: return self.inv(method=method)*rhs
def __mathml__(self): mml = "" for i in range(self.rows): mml += "<matrixrow>" for j in range(self.cols): mml += self[i, j].__mathml__() mml += "</matrixrow>" return "<matrix>" + mml + "</matrix>"
"""Return a submatrix by specifying a list of rows and columns. Negative indices can be given. All indices must be in the range -n <= i < n where n is the number of rows or columns.
Examples ========
>>> from sympy import Matrix >>> m = Matrix(4, 3, range(12)) >>> m Matrix([ [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]) >>> m.extract([0, 1, 3], [0, 1]) Matrix([ [0, 1], [3, 4], [9, 10]])
Rows or columns can be repeated:
>>> m.extract([0, 0, 1], [-1]) Matrix([ [2], [2], [5]])
Every other row can be taken by using range to provide the indices:
>>> m.extract(range(0, m.rows, 2), [-1]) Matrix([ [2], [8]])
""" lambda i, j: flat_list[rowsList[i]*cols + colsList[j]])
"""Converts a key with potentially mixed types of keys (integer and slice) into a tuple of ranges and raises an error if any index is out of self's range.
See Also ========
key2ij """
rlo = rhi = 0 else: else: clo = chi = 0 else: else:
"""Converts key into canonical form, converting integers or indexable items into valid integers for self's range or returning slices unchanged.
See Also ========
key2bounds """ raise TypeError('key must be a sequence of length 2') for i, n in zip(key, self.shape)] else:
"""Apply evalf() to each element of self."""
"""Returns the atoms that form the current object.
Examples ========
>>> from sympy.abc import x, y >>> from sympy.matrices import Matrix >>> Matrix([[x]]) Matrix([[x]]) >>> _.atoms() set([x]) """
[t if isinstance(t, type) else type(t) for t in types]) else:
def free_symbols(self): """Returns the free symbols within the matrix.
Examples ========
>>> from sympy.abc import x >>> from sympy.matrices import Matrix >>> Matrix([[x], [1]]).free_symbols set([x]) """
"""Return a new matrix with subs applied to each entry.
Examples ========
>>> from sympy.abc import x, y >>> from sympy.matrices import SparseMatrix, Matrix >>> SparseMatrix(1, 1, [x]) Matrix([[x]]) >>> _.subs(x, y) Matrix([[y]]) >>> Matrix(_).subs(y, x) Matrix([[x]]) """
mul=True, log=True, multinomial=True, basic=True, **hints): """Apply core.function.expand to each entry of the matrix.
Examples ========
>>> from sympy.abc import x >>> from sympy.matrices import Matrix >>> Matrix(1, 1, [x*(x+1)]) Matrix([[x*(x + 1)]]) >>> _.expand() Matrix([[x**2 + x]])
""" deep, modulus, power_base, power_exp, mul, log, multinomial, basic, **hints))
"""Apply simplify to each element of the matrix.
Examples ========
>>> from sympy.abc import x, y >>> from sympy import sin, cos >>> from sympy.matrices import SparseMatrix >>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2]) Matrix([[x*sin(y)**2 + x*cos(y)**2]]) >>> _.simplify() Matrix([[x]]) """
"""Shows location of non-zero entries for fast shape lookup.
Examples ========
>>> from sympy.matrices import Matrix, eye >>> m = Matrix(2, 3, lambda i, j: i*3+j) >>> m Matrix([ [0, 1, 2], [3, 4, 5]]) >>> m.print_nonzero() [ XX] [XXX] >>> m = eye(4) >>> m.print_nonzero("x") [x ] [ x ] [ x ] [ x]
""" else:
"""Solve the linear system Ax = rhs for x where A = self.
This is for symbolic matrices, for real or complex ones use mpmath.lu_solve or mpmath.qr_solve.
See Also ========
lower_triangular_solve upper_triangular_solve gauss_jordan_solve cholesky_solve diagonal_solve LDLsolve QRsolve pinv_solve LUdecomposition """
# forward substitution, all diag entries are scaled to 1 # backward substitution
"""Returns the decomposition LU and the row swaps p.
Examples ========
>>> from sympy import Matrix >>> a = Matrix([[4, 3], [6, 3]]) >>> L, U, _ = a.LUdecomposition() >>> L Matrix([ [ 1, 0], [3/2, 1]]) >>> U Matrix([ [4, 3], [0, -3/2]])
See Also ========
cholesky LDLdecomposition QRdecomposition LUdecomposition_Simple LUdecompositionFF LUsolve """ else:
"""Returns A comprised of L, U (L's diag entries are 1) and p which is the list of the row swaps (in order).
See Also ========
LUdecomposition LUdecompositionFF LUsolve """ # factorization # find the first non-zero pivot, includes any expression # this result is based on iszerofunc's analysis of the possible pivots, so even though # the element may not be strictly zero, the supplied iszerofunc's evaluation gave True
"""Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U. If the elements of the matrix belong to some integral domain I, then all elements of L, D and U are guaranteed to belong to I.
**Reference** - W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms for LU and QR factors". Frontiers in Computer Science in China, Vol 2, no. 1, pp. 67-80, 2008.
See Also ========
LUdecomposition LUdecomposition_Simple LUsolve """
else: raise ValueError("Matrix is not full rank")
"""Return a matrix containing the cofactor of each element.
See Also ========
cofactor minorEntry minorMatrix adjugate """ self.cofactor(i, j, method))
"""Calculate the minor of an element.
See Also ========
minorMatrix cofactor cofactorMatrix """ "(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
"""Creates the minor matrix of a given element.
See Also ========
minorEntry cofactor cofactorMatrix """ "(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
"""Calculate the cofactor of an element.
See Also ========
cofactorMatrix minorEntry minorMatrix """ else:
"""Calculates the Jacobian matrix (derivative of a vectorial function).
Parameters ==========
self : vector of expressions representing functions f_i(x_1, ..., x_n). X : set of x_i's in order, it can be a list or a Matrix
Both self and X can be a row or a column matrix in any order (i.e., jacobian() should always work).
Examples ========
>>> from sympy import sin, cos, Matrix >>> from sympy.abc import rho, phi >>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2]) >>> Y = Matrix([rho, phi]) >>> X.jacobian(Y) Matrix([ [cos(phi), -rho*sin(phi)], [sin(phi), rho*cos(phi)], [ 2*rho, 0]]) >>> X = Matrix([rho*cos(phi), rho*sin(phi)]) >>> X.jacobian(Y) Matrix([ [cos(phi), -rho*sin(phi)], [sin(phi), rho*cos(phi)]])
See Also ========
hessian wronskian """ # Both X and self can be a row or a column matrix, so we need to make # sure all valid combinations work, but everything else fails: else: else:
# m is the number of functions and n is the number of variables # computing the Jacobian is now easy:
"""Return Q, R where A = Q*R, Q is orthogonal and R is upper triangular.
Examples ========
This is the example from wikipedia:
>>> from sympy import Matrix >>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]]) >>> Q, R = A.QRdecomposition() >>> Q Matrix([ [ 6/7, -69/175, -58/175], [ 3/7, 158/175, 6/175], [-2/7, 6/35, -33/35]]) >>> R Matrix([ [14, 21, -14], [ 0, 175, -70], [ 0, 0, 35]]) >>> A == Q*R True
QR factorization of an identity matrix:
>>> A = Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> Q, R = A.QRdecomposition() >>> Q Matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> R Matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]])
See Also ========
cholesky LDLdecomposition LUdecomposition QRsolve """
"The number of rows must be greater than columns") # subtract the project of mat on new vector # normalize it raise NotImplementedError( "Could not normalize the vector %d." % j)
"""Solve the linear system 'Ax = b'.
'self' is the matrix 'A', the method argument is the vector 'b'. The method returns the solution vector 'x'. If 'b' is a matrix, the system is solved for each column of 'b' and the return value is a matrix of the same shape as 'b'.
This method is slower (approximately by a factor of 2) but more stable for floating-point arithmetic than the LUsolve method. However, LUsolve usually uses an exact arithmetic, so you don't need to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real (or complex) matrices use mpmath.qr_solve.
See Also ========
lower_triangular_solve upper_triangular_solve gauss_jordan_solve cholesky_solve diagonal_solve LDLsolve LUsolve pinv_solve QRdecomposition """
# back substitution to solve R*x = y: # We build up the result "backwards" in the vector 'x' and reverse it # only in the end.
"""Return the cross product of `self` and `b` relaxing the condition of compatible dimensions: if each has 3 elements, a matrix of the same type and shape as `self` will be returned. If `b` has the same shape as `self` then common identities for the cross product (like `a x b = - b x a`) will hold.
See Also ========
dot multiply multiply_elementwise """ type(b)) else: (self[1]*b[2] - self[2]*b[1]), (self[2]*b[0] - self[0]*b[2]), (self[0]*b[1] - self[1]*b[0])))
"""Return the dot product of Matrix self and b relaxing the condition of compatible dimensions: if either the number of rows or columns are the same as the length of b then the dot product is returned. If self is a row or column vector, a scalar is returned. Otherwise, a list of results is returned (and in that case the number of columns in self must match the length of b).
Examples ========
>>> from sympy import Matrix >>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> v = [1, 1, 1] >>> M.row(0).dot(v) 6 >>> M.col(0).dot(v) 12 >>> M.dot(v) [6, 15, 24]
See Also ========
cross multiply multiply_elementwise """
else: type(b))
return prod elif mat.rows == b.rows: return mat.T.dot(b) else: raise ShapeError("Dimensions incorrect for dot product.")
"""Return the Hadamard product (elementwise product) of A and B
Examples ========
>>> from sympy.matrices import Matrix >>> A = Matrix([[0, 1, 2], [3, 4, 5]]) >>> B = Matrix([[1, 10, 100], [100, 10, 1]]) >>> A.multiply_elementwise(B) Matrix([ [ 0, 10, 200], [300, 40, 5]])
See Also ========
cross dot multiply """
"""Return non-zero values of self."""
"""Return the Norm of a Matrix or Vector. In the simplest case this is the geometric size of the vector Other norms can be specified by the ord parameter
===== ============================ ========================== ord norm for matrices norm for vectors ===== ============================ ========================== None Frobenius norm 2-norm 'fro' Frobenius norm - does not exist inf -- max(abs(x)) -inf -- min(abs(x)) 1 -- as below -1 -- as below 2 2-norm (largest sing. value) as below -2 smallest singular value as below other - does not exist sum(abs(x)**ord)**(1./ord) ===== ============================ ==========================
Examples ========
>>> from sympy import Matrix, Symbol, trigsimp, cos, sin, oo >>> x = Symbol('x', real=True) >>> v = Matrix([cos(x), sin(x)]) >>> trigsimp( v.norm() ) 1 >>> v.norm(10) (sin(x)**10 + cos(x)**10)**(1/10) >>> A = Matrix([[1, 1], [1, 1]]) >>> A.norm(2)# Spectral norm (max of |Ax|/|x| under 2-vector-norm) 2 >>> A.norm(-2) # Inverse spectral norm (smallest singular value) 0 >>> A.norm() # Frobenius Norm 2 >>> Matrix([1, -2]).norm(oo) 2 >>> Matrix([-1, 2]).norm(-oo) 1
See Also ========
normalized """ # Row or Column Vector Norms
# Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord) # Note that while useful this is not mathematically a norm except (NotImplementedError, TypeError): raise ValueError("Expected order to be Number, Symbol, oo")
# Matrix Norms else: # Maximum singular value
# Minimum singular value
['f', 'fro', 'frobenius', 'vector']): # Reshape as vector and send back to norm function
else: raise NotImplementedError("Matrix Norms under development")
"""Return the normalized version of ``self``.
See Also ========
norm """
"""Return the projection of ``self`` onto the line containing ``v``.
Examples ========
>>> from sympy import Matrix, S, sqrt >>> V = Matrix([sqrt(3)/2, S.Half]) >>> x = Matrix([[1, 0]]) >>> V.project(x) Matrix([[sqrt(3)/2, 0]]) >>> V.project(-x) Matrix([[sqrt(3)/2, 0]]) """
"""Permute the rows of the matrix with the given permutation in reverse.
Examples ========
>>> from sympy.matrices import eye >>> M = eye(3) >>> M.permuteBkwd([[0, 1], [0, 2]]) Matrix([ [0, 1, 0], [0, 0, 1], [1, 0, 0]])
See Also ========
permuteFwd """
"""Permute the rows of the matrix with the given permutation.
Examples ========
>>> from sympy.matrices import eye >>> M = eye(3) >>> M.permuteFwd([[0, 1], [0, 2]]) Matrix([ [0, 0, 1], [1, 0, 0], [0, 1, 0]])
See Also ========
permuteBkwd """
"""Return the exponentiation of a square matrix.""" "Exponentiation is valid only for square matrices") except MatrixError: raise NotImplementedError("Exponentiation is implemented only for matrices for which the Jordan normal form can be computed")
# This function computes the matrix exponential for one single Jordan block else: # extract the diagonal part #and the nilpotent part # compute its exponential # combine the two parts
# n = self.rows
def is_square(self): """Checks if a matrix is square.
A matrix is square if the number of rows equals the number of columns. The empty matrix is square by definition, since the number of rows and the number of columns are both zero.
Examples ========
>>> from sympy import Matrix >>> a = Matrix([[1, 2, 3], [4, 5, 6]]) >>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> c = Matrix([]) >>> a.is_square False >>> b.is_square True >>> c.is_square True """
def is_zero(self): """Checks if a matrix is a zero matrix.
A matrix is zero if every element is zero. A matrix need not be square to be considered zero. The empty matrix is zero by the principle of vacuous truth. For a matrix that may or may not be zero (e.g. contains a symbol), this will be None
Examples ========
>>> from sympy import Matrix, zeros >>> from sympy.abc import x >>> a = Matrix([[0, 0], [0, 0]]) >>> b = zeros(3, 4) >>> c = Matrix([[0, 1], [0, 0]]) >>> d = Matrix([]) >>> e = Matrix([[x, 0], [0, 0]]) >>> a.is_zero True >>> b.is_zero True >>> c.is_zero False >>> d.is_zero True >>> e.is_zero """
"""Checks if a matrix is nilpotent.
A matrix B is nilpotent if for some integer k, B**k is a zero matrix.
Examples ========
>>> from sympy import Matrix >>> a = Matrix([[0, 0, 0], [1, 0, 0], [1, 1, 0]]) >>> a.is_nilpotent() True
>>> a = Matrix([[1, 0, 1], [1, 0, 0], [1, 1, 0]]) >>> a.is_nilpotent() False """ "Nilpotency is valid only for square matrices")
def is_upper(self): """Check if matrix is an upper triangular matrix. True can be returned even if the matrix is not square.
Examples ========
>>> from sympy import Matrix >>> m = Matrix(2, 2, [1, 0, 0, 1]) >>> m Matrix([ [1, 0], [0, 1]]) >>> m.is_upper True
>>> m = Matrix(4, 3, [5, 1, 9, 0, 4 , 6, 0, 0, 5, 0, 0, 0]) >>> m Matrix([ [5, 1, 9], [0, 4, 6], [0, 0, 5], [0, 0, 0]]) >>> m.is_upper True
>>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1]) >>> m Matrix([ [4, 2, 5], [6, 1, 1]]) >>> m.is_upper False
See Also ========
is_lower is_diagonal is_upper_hessenberg """ for i in range(1, self.rows) for j in range(i))
def is_lower(self): """Check if matrix is a lower triangular matrix. True can be returned even if the matrix is not square.
Examples ========
>>> from sympy import Matrix >>> m = Matrix(2, 2, [1, 0, 0, 1]) >>> m Matrix([ [1, 0], [0, 1]]) >>> m.is_lower True
>>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4 , 0, 6, 6, 5]) >>> m Matrix([ [0, 0, 0], [2, 0, 0], [1, 4, 0], [6, 6, 5]]) >>> m.is_lower True
>>> from sympy.abc import x, y >>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y]) >>> m Matrix([ [x**2 + y, x + y**2], [ 0, x + y]]) >>> m.is_lower False
See Also ========
is_upper is_diagonal is_lower_hessenberg """ for i in range(self.rows) for j in range(i + 1, self.cols))
def is_hermitian(self): """Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of element j,i.
Examples ========
>>> from sympy.matrices import Matrix >>> from sympy import I >>> from sympy.abc import x >>> a = Matrix([[1, I], [-I, 1]]) >>> a Matrix([ [ 1, I], [-I, 1]]) >>> a.is_hermitian True >>> a[0, 0] = 2*I >>> a.is_hermitian False >>> a[0, 0] = x >>> a.is_hermitian >>> a[0, 1] = a[1, 0]*I >>> a.is_hermitian False """ self[i, i].is_real for i in range(self.rows)) (self[i, j] - self[j, i].conjugate()).is_zero for i in range(self.rows) for j in range(i + 1, self.cols))
def is_upper_hessenberg(self): """Checks if the matrix is the upper-Hessenberg form.
The upper hessenberg matrix has zero entries below the first subdiagonal.
Examples ========
>>> from sympy.matrices import Matrix >>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]]) >>> a Matrix([ [1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]]) >>> a.is_upper_hessenberg True
See Also ========
is_lower_hessenberg is_upper """ for i in range(2, self.rows) for j in range(i - 1))
def is_lower_hessenberg(self): r"""Checks if the matrix is in the lower-Hessenberg form.
The lower hessenberg matrix has zero entries above the first superdiagonal.
Examples ========
>>> from sympy.matrices import Matrix >>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]]) >>> a Matrix([ [1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]]) >>> a.is_lower_hessenberg True
See Also ========
is_upper_hessenberg is_lower """ for i in range(self.rows) for j in range(i + 2, self.cols))
"""Checks if any elements contain Symbols.
Examples ========
>>> from sympy.matrices import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[x, y], [1, 0]]) >>> M.is_symbolic() True
"""
"""Check if matrix is symmetric matrix, that is square matrix and is equal to its transpose.
By default, simplifications occur before testing symmetry. They can be skipped using 'simplify=False'; while speeding things a bit, this may however induce false negatives.
Examples ========
>>> from sympy import Matrix >>> m = Matrix(2, 2, [0, 1, 1, 2]) >>> m Matrix([ [0, 1], [1, 2]]) >>> m.is_symmetric() True
>>> m = Matrix(2, 2, [0, 1, 2, 0]) >>> m Matrix([ [0, 1], [2, 0]]) >>> m.is_symmetric() False
>>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0]) >>> m Matrix([ [0, 0, 0], [0, 0, 0]]) >>> m.is_symmetric() False
>>> from sympy.abc import x, y >>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2 , 2, 0, y, 0, 3]) >>> m Matrix([ [ 1, x**2 + 2*x + 1, y], [(x + 1)**2, 2, 0], [ y, 0, 3]]) >>> m.is_symmetric() True
If the matrix is already simplified, you may speed-up is_symmetric() test by using 'simplify=False'.
>>> m.is_symmetric(simplify=False) False >>> m1 = m.expand() >>> m1.is_symmetric(simplify=False) True """ else:
"""Check if matrix M is an antisymmetric matrix, that is, M is a square matrix with all M[i, j] == -M[j, i].
When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is simplified before testing to see if it is zero. By default, the SymPy simplify function is used. To use a custom function set simplify to a function that accepts a single argument which returns a simplified expression. To skip simplification, set simplify to False but note that although this will be faster, it may induce false negatives.
Examples ========
>>> from sympy import Matrix, symbols >>> m = Matrix(2, 2, [0, 1, -1, 0]) >>> m Matrix([ [ 0, 1], [-1, 0]]) >>> m.is_anti_symmetric() True >>> x, y = symbols('x y') >>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0]) >>> m Matrix([ [ 0, 0, x], [-y, 0, 0]]) >>> m.is_anti_symmetric() False
>>> from sympy.abc import x, y >>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y, ... -(x + 1)**2 , 0, x*y, ... -y, -x*y, 0])
Simplification of matrix elements is done by default so even though two elements which should be equal and opposite wouldn't pass an equality test, the matrix is still reported as anti-symmetric:
>>> m[0, 1] == -m[1, 0] False >>> m.is_anti_symmetric() True
If 'simplify=False' is used for the case when a Matrix is already simplified, this will speed things up. Here, we see that without simplification the matrix does not appear anti-symmetric:
>>> m.is_anti_symmetric(simplify=False) False
But if the matrix were already expanded, then it would appear anti-symmetric and simplification in the is_anti_symmetric routine is not needed:
>>> m = m.expand() >>> m.is_anti_symmetric(simplify=False) True """ # accept custom simplification _simplify if simplify else False
# diagonal # others else:
"""Check if matrix is diagonal, that is matrix in which the entries outside the main diagonal are all zero.
Examples ========
>>> from sympy import Matrix, diag >>> m = Matrix(2, 2, [1, 0, 0, 2]) >>> m Matrix([ [1, 0], [0, 2]]) >>> m.is_diagonal() True
>>> m = Matrix(2, 2, [1, 1, 0, 2]) >>> m Matrix([ [1, 1], [0, 2]]) >>> m.is_diagonal() False
>>> m = diag(1, 2, 3) >>> m Matrix([ [1, 0, 0], [0, 2, 0], [0, 0, 3]]) >>> m.is_diagonal() True
See Also ========
is_lower is_upper is_diagonalizable diagonalize """
"""Computes the matrix determinant using the method "method".
Possible values for "method": bareis ... det_bareis berkowitz ... berkowitz_det det_LU ... det_LU_decomposition
See Also ========
det_bareis berkowitz_det det_LU """
# if methods were made internal and all determinant calculations # passed through here, then these lines could be factored out of # the method routines else:
"""Compute matrix determinant using Bareis' fraction-free algorithm which is an extension of the well known Gaussian elimination method. This approach is best suited for dense symbolic matrices and will result in a determinant with minimal number of fractions. It means that less term rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF), http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also ========
det berkowitz_det """
(M[0, 2]*M[1, 1]*M[2, 0] + M[0, 0]*M[1, 2]*M[2, 1] + M[0, 1]*M[1, 0]*M[2, 2]) else:
# look for a pivot in the current column # and assume det == 0 if none is found else:
# proceed with Bareis' fraction-free (FF) # form of Gaussian elimination algorithm
else:
"""Compute matrix determinant using LU decomposition
Note that this method fails if the LU decomposition itself fails. In particular, if the matrix has no inverse this method will fail.
TODO: Implement algorithm for sparse matrices (SFF), http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also ========
det det_bareis berkowitz_det """ raise NonSquareMatrixError()
"""Returns the adjugate matrix.
Adjugate matrix is the transpose of the cofactor matrix.
http://en.wikipedia.org/wiki/Adjugate
See Also ========
cofactorMatrix transpose berkowitz """
"""Calculates the inverse using LU decomposition.
See Also ========
inv inverse_GE inverse_ADJ """
"""Calculates the inverse using Gaussian elimination.
See Also ========
inv inverse_LU inverse_ADJ """
"""Calculates the inverse using the adjugate matrix and a determinant.
See Also ========
inv inverse_LU inverse_GE """
# if equals() can't decide, will rref be able to? ok = self.rref(simplify=True)[0] zero = any(iszerofunc(ok[j, j]) for j in range(ok.rows))
"""Return reduced row-echelon form of matrix and indices of pivot vars.
To simplify elements before finding nonzero pivots set simplify=True (to use the default SymPy simplify function) or pass a custom simplify function.
Examples ========
>>> from sympy import Matrix >>> from sympy.abc import x >>> m = Matrix([[1, 2], [x, 1 - 1/x]]) >>> m.rref() (Matrix([ [1, 0], [0, 1]]), [0, 1]) """ simplify, FunctionType) else _simplify # pivot: index of next row to contain a pivot # pivotlist: indices of pivot variables (non-free) else:
""" Returns the rank of a matrix
>>> from sympy import Matrix >>> from sympy.abc import x >>> m = Matrix([[1, 2], [x, 1 - 1/x]]) >>> m.rank() 2 >>> n = Matrix(3, 3, range(1, 10)) >>> n.rank() 2 """
"""Returns list of vectors (Matrix objects) that span nullspace of self
Examples ========
>>> from sympy.matrices import Matrix >>> m = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6]) >>> m Matrix([ [ 1, 3, 0], [-2, -6, 0], [ 3, 9, 6]]) >>> m.nullspace() [Matrix([ [-3], [ 1], [ 0]])]
See Also ========
columnspace """
simplify, FunctionType) else _simplify
# create a set of vectors for the basis # contains the variable index to which the vector corresponds else: # add negative of nonpivot entry to corr vector v = simpfunc(v) # XXX: Is this the correct error? raise NotImplementedError( "Could not compute the nullspace of `self`.")
"""Returns list of vectors (Matrix objects) that span columnspace of self
Examples ========
>>> from sympy.matrices import Matrix >>> m = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6]) >>> m Matrix([ [ 1, 3, 0], [-2, -6, 0], [ 3, 9, 6]]) >>> m.columnspace() [Matrix([ [ 1], [-2], [ 3]]), Matrix([ [0], [0], [6]])]
See Also ========
nullspace """ simplify, FunctionType) else _simplify
# create a set of vectors for the basis
"""The Berkowitz algorithm.
Given N x N matrix with symbolic content, compute efficiently coefficients of characteristic polynomials of 'self' and all its square sub-matrices composed by removing both i-th row and column, without division in the ground domain.
This method is particularly useful for computing determinant, principal minors and characteristic polynomial, when 'self' has complicated coefficients e.g. polynomials. Semi-direct usage of this algorithm is also important in computing efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and I is N x N identity matrix, then the following following definition of characteristic polynomial is begin used:
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz algorithm are monic.
>>> from sympy import Matrix >>> from sympy.abc import x, y, z
>>> M = Matrix([[x, y, z], [1, 0, 0], [y, z, x]])
>>> p, q, r = M.berkowitz()
>>> p # 1 x 1 M's sub-matrix (1, -x)
>>> q # 2 x 2 M's sub-matrix (1, -x, -y)
>>> r # 3 x 3 M's sub-matrix (1, -2*x, x**2 - y*z - y, x*y - z**2)
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small parallel time using a small number of processors, ACM, Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants using Bezout matrices, Tech. Report MPI-I-2006-1-006, Saarbrucken, 2006
See Also ========
berkowitz_det berkowitz_minors berkowitz_charpoly berkowitz_eigenvals """
"""Computes determinant using Berkowitz method.
See Also ========
det berkowitz """
"""Computes principal minors using Berkowitz method.
See Also ========
berkowitz """
"""Computes characteristic polynomial minors using Berkowitz method.
A PurePoly is returned so using different variables for ``x`` does not affect the comparison or the polynomials:
Examples ========
>>> from sympy import Matrix >>> from sympy.abc import x, y >>> A = Matrix([[1, 3], [2, 0]]) >>> A.berkowitz_charpoly(x) == A.berkowitz_charpoly(y) True
Specifying ``x`` is optional; a Dummy with name ``lambda`` is used by default (which looks good when pretty-printed in unicode):
>>> A.berkowitz_charpoly().as_expr() _lambda**2 - _lambda - 6
No test is done to see that ``x`` doesn't clash with an existing symbol, so using the default (``lambda``) or your own Dummy symbol is the safest option:
>>> A = Matrix([[1, 2], [x, 0]]) >>> A.charpoly().as_expr() _lambda**2 - _lambda - 2*x >>> A.charpoly(x).as_expr() x**2 - 3*x
See Also ========
berkowitz """
"""Computes eigenvalues of a Matrix using Berkowitz method.
See Also ========
berkowitz """
"""Return eigen values using the berkowitz_eigenvals routine.
Since the roots routine doesn't always work well with Floats, they will be replaced with Rationals before calling that routine. If this is not desired, set flag ``rational`` to False. """ # roots doesn't like Floats, so replace them with Rationals # unless the nsimplify flag indicates that this has already # been done, e.g. in eigenvects [nsimplify(v, rational=True) for v in mat])
"""Return list of triples (eigenval, multiplicity, basis).
The flag ``simplify`` has two effects: 1) if bool(simplify) is True, as_content_primitive() will be used to tidy up normalization artifacts; 2) if nullspace needs simplification to compute the basis, the simplify flag will be passed on to the nullspace routine which will interpret it there.
If the matrix contains any Floats, they will be changed to Rationals for computation purposes, but the answers will be returned after being evaluated with evalf. If it is desired to removed small imaginary portions during the evalf step, pass a value for the ``chop`` flag. """
# roots doesn't like Floats, so replace them with Rationals float = True mat = mat._new(mat.rows, mat.cols, [nsimplify( v, rational=True) for v in mat]) flags['rational'] = False # to tell eigenvals not to do this
# whether tmp.is_symbolic() is True or False, it is possible that # the basis will come back as [] in which case simplification is # necessary. # The nullspace routine failed, try it again with simplification basis = tmp.nullspace(simplify=simplify) if not basis: raise NotImplementedError( "Can't evaluate eigenvector for eigenvalue %s" % r) # the relationship A*e = lambda*e will still hold if we change the # eigenvector; so if simplify is True we tidy up any normalization # artifacts with as_content_primtive (default) and remove any pure Integer # denominators. out.append((r.evalf(chop=chop), k, [ mat._new(b).evalf(chop=chop) for b in basis])) else:
"""Compute the singular values of a Matrix
Examples ========
>>> from sympy import Matrix, Symbol >>> x = Symbol('x', real=True) >>> A = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]]) >>> A.singular_values() [sqrt(x**2 + 1), 1, 0]
See Also ========
condition_number """ # Compute eigenvalues of A.H A
# Expands result from eigenvals into a simple list # sort them in descending order
"""Returns the condition number of a matrix.
This is the maximum singular value divided by the minimum singular value
Examples ========
>>> from sympy import Matrix, S >>> A = Matrix([[1, 0, 0], [0, 10, 0], [0, 0, S.One/10]]) >>> A.condition_number() 100
See Also ========
singular_values """
def doit(*args): item_doit = lambda item: getattr(item, attr)(*args) return self.applyfunc(item_doit) return doit else: "%s has no attribute %s." % (self.__class__.__name__, attr))
"""Integrate each element of the matrix.
Examples ========
>>> from sympy.matrices import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[x, y], [1, 0]]) >>> M.integrate((x, )) Matrix([ [x**2/2, x*y], [ x, 0]]) >>> M.integrate((x, 0, 2)) Matrix([ [2, 2*y], [2, 0]])
See Also ========
limit diff """ lambda i, j: self[i, j].integrate(*args))
"""Calculate the limit of each element in the matrix.
Examples ========
>>> from sympy.matrices import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[x, y], [1, 0]]) >>> M.limit(x, 2) Matrix([ [2, y], [1, 0]])
See Also ========
integrate diff """ lambda i, j: self[i, j].limit(*args))
"""Calculate the derivative of each element in the matrix.
Examples ========
>>> from sympy.matrices import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[x, y], [1, 0]]) >>> M.diff(x) Matrix([ [1, 0], [0, 0]])
See Also ========
integrate limit """ lambda i, j: self[i, j].diff(*args))
"""Return the Matrix converted into a one column matrix by stacking columns
Examples ========
>>> from sympy import Matrix >>> m=Matrix([[1, 3], [2, 4]]) >>> m Matrix([ [1, 3], [2, 4]]) >>> m.vec() Matrix([ [1], [2], [3], [4]])
See Also ========
vech """
"""Return the unique elements of a symmetric Matrix as a one column matrix by stacking the elements in the lower triangle.
Arguments: diagonal -- include the diagonal cells of self or not check_symmetry -- checks symmetry of self but not completely reliably
Examples ========
>>> from sympy import Matrix >>> m=Matrix([[1, 2], [2, 3]]) >>> m Matrix([ [1, 2], [2, 3]]) >>> m.vech() Matrix([ [1], [2], [3]]) >>> m.vech(diagonal=False) Matrix([[2]])
See Also ========
vec """
else:
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of linear equations which may be decoupled by having a block diagonal structure.
Examples ========
>>> from sympy import Matrix >>> from sympy.abc import x, y, z >>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]]) >>> a1, a2, a3 = A.get_diag_blocks() >>> a1 Matrix([ [1, 3], [y, z**2]]) >>> a2 Matrix([[x]]) >>> a3 Matrix([[0]])
"""
else: else: else:
""" Return (P, D), where D is diagonal and
D = P^-1 * M * P
where M is current matrix.
Examples ========
>>> from sympy import Matrix >>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2]) >>> m Matrix([ [1, 2, 0], [0, 3, 0], [2, -4, 2]]) >>> (P, D) = m.diagonalize() >>> D Matrix([ [1, 0, 0], [0, 2, 0], [0, 0, 3]]) >>> P Matrix([ [-1, 0, -1], [ 0, 0, -1], [ 2, 1, 2]]) >>> P.inv() * m * P Matrix([ [1, 0, 0], [0, 2, 0], [0, 0, 3]])
See Also ========
is_diagonal is_diagonalizable
"""
else: self._eigenvects = self.eigenvects(simplify=True)
"""Check if matrix is diagonalizable.
If reals_only==True then check that diagonalized matrix consists of the only not complex values.
Some subproducts could be used further in other methods to avoid double calculations, By default (if clear_subproducts==True) they will be deleted.
Examples ========
>>> from sympy import Matrix >>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2]) >>> m Matrix([ [1, 2, 0], [0, 3, 0], [2, -4, 2]]) >>> m.is_diagonalizable() True >>> m = Matrix(2, 2, [0, 1, 0, 0]) >>> m Matrix([ [0, 1], [0, 0]]) >>> m.is_diagonalizable() False >>> m = Matrix(2, 2, [0, 1, -1, 0]) >>> m Matrix([ [ 0, 1], [-1, 0]]) >>> m.is_diagonalizable() True >>> m.is_diagonalizable(True) False
See Also ========
is_diagonal diagonalize """
# To every eigenvalue may belong `i` blocks with size s(i) # and a chain of generalized eigenvectors # which will be determined by the following computations: # for every eigenvalue we will add a dictionary # containing, for all blocks, the blocksizes and the attached chain vectors # that will eventually be used to form the transformation P raise AttributeError("could not compute the eigenvalues") # The Jordan chains have all length 1 and consist of only one vector # which is the eigenvector of course raise MatrixError("Matrix has the eigen vector with geometrical multiplicity equal zero.") else: # Up to now we know nothing about the sizes of the blocks of our Jordan matrix. # Note that knowledge of algebraic and geometrical multiplicity # will *NOT* be sufficient to determine this structure. # The blocksize `s` could be defined as the minimal `k` where # `kernel(self-lI)^k = kernel(self-lI)^(k+1)` # The extreme case would be that k = (multiplicity-geometrical+1) # but the blocks could be smaller.
# Consider for instance the following matrix
# [2 1 0 0] # [0 2 1 0] # [0 0 2 0] # [0 0 0 2]
# which coincides with it own Jordan canonical form. # It has only one eigenvalue l=2 of (algebraic) multiplicity=4. # It has two eigenvectors, one belonging to the last row (blocksize 1) # and one being the last part of a jordan chain of length 3 (blocksize of the first block).
# Note again that it is not not possible to obtain this from the algebraic and geometrical # multiplicity alone. This only gives us an upper limit for the dimension of one of # the subspaces (blocksize of according jordan block) given by # max=(multiplicity-geometrical+1) which is reached for our matrix # but not for
# [2 1 0 0] # [0 2 0 0] # [0 0 2 1] # [0 0 0 2]
# although multiplicity=4 and geometrical=2 are the same for this matrix.
# We will store the matrices `(self-l*I)^k` for further computations # for convenience only we store `Ms[0]=(sefl-lI)^0=I` # so the index is the same as the power for all further Ms entries # We also store the vectors that span these kernels (Ns[0] = []) # and also their dimensions `a_s` # this is mainly done for debugging since the number of blocks of a given size # can be computed from the a_s, in order to check our result which is obtained simpler # by counting the number of Jordan chains for `a` given `s` # `a_0` is `dim(Kernel(Ms[0]) = dim (Kernel(I)) = 0` since `I` is regular
# We now have `Ms[-1]=((self-l*I)**s)=Z=0`. # We also know the size of the biggest Jordan block # associated with `l` to be `s`. # Now let us proceed with the computation of the associate part of the transformation matrix `P`. # We already know the kernel (=nullspace) `K_l` of (self-lI) which consists of the # eigenvectors belonging to eigenvalue `l`. # The dimension of this space is the geometric multiplicity of eigenvalue `l`. # For every eigenvector ev out of `K_l`, there exists a subspace that is # spanned by the Jordan chain of ev. The dimension of this subspace is # represented by the length `s` of the Jordan block. # The chain itself is given by `{e_0,..,e_s-1}` where: # `e_k+1 =(self-lI)e_k (*)` # and # `e_s-1=ev` # So it would be possible to start with the already known `ev` and work backwards until one # reaches `e_0`. Unfortunately this can not be done by simply solving system (*) since its matrix # is singular (by definition of the eigenspaces). # This approach would force us a choose in every step the degree of freedom undetermined # by (*). This is difficult to implement with computer algebra systems and also quite inefficient. # We therefore reformulate the problem in terms of nullspaces. # To do so we start from the other end and choose `e0`'s out of # `E=Kernel(self-lI)^s / Kernel(self-lI)^(s-1)` # Note that `Kernel(self-lI)^s = Kernel(Z) = V` (the whole vector space). # So in the first step `s=smax` this restriction turns out to actually restrict nothing at all # and the only remaining condition is to choose vectors in `Kernel(self-lI)^(s-1)`. # Subsequently we compute `e_1=(self-lI)e_0`, `e_2=(self-lI)*e_1` and so on. # The subspace `E` can have a dimension larger than one. # That means that we have more than one Jordan block of size `s` for the eigenvalue `l` # and as many Jordan chains (this is the case in the second example). # In this case we start as many Jordan chains and have as many blocks of size `s` in the jcf. # We now have all the Jordan blocks of size `s` but there might be others attached to the same # eigenvalue that are smaller. # So we will do the same procedure also for `s-1` and so on until 1 (the lowest possible order # where the Jordan chain is of length 1 and just represented by the eigenvector).
# We want the vectors in `Kernel((self-lI)^s)` (**), # but without those in `Kernel(self-lI)^s-1` so we will add these as additional equations # to the system formed by `S` (`S` will no longer be quadratic but this does no harm # since `S` is rank deficient). # We also want to exclude the vectors in the chains for the bigger blocks # that we have already computed (if there are any). # (That is why we start with the biggest s).
######## Implementation remark: ########
# Doing so for *ALL* already computed chain vectors # we actually exclude some vectors twice because they are already excluded # by the condition (**). # This happens if there are more than one blocks attached to the same eigenvalue *AND* # the current blocksize is smaller than the block whose chain vectors we exclude. # If the current block has size `s_i` and the next bigger block has size `s_i-1` then # the first `s_i-s_i-1` chainvectors of the bigger block are already excluded by (**). # The unnecassary adding of these equations could be avoided if the algorithm would # take into account the lengths of the already computed chains which are already stored # and add only the last `s` items. # However the following loop would be a good deal more nested to do so. # Since adding a linear dependent equation does not change the result, # it can harm only in terms of efficiency. # So to be sure I left it there for the moment.
# Determine the number of chain leaders which equals the number of blocks with that size. # s_cells=[]
# We want the chain leader appear as the last of the block.
r"""Return Jordan form J of current matrix.
Also the transformation P such that
`J = P^{-1} \cdot M \cdot P`
and the jordan blocks forming J will be calculated.
Examples ========
>>> from sympy import Matrix >>> m = Matrix([ ... [ 6, 5, -2, -3], ... [-3, -1, 3, 3], ... [ 2, 1, -2, -3], ... [-1, 1, 5, 5]]) >>> P, J = m.jordan_form() >>> J Matrix([ [2, 1, 0, 0], [0, 2, 0, 0], [0, 0, 2, 1], [0, 0, 0, 2]])
See Also ========
jordan_cells """
r"""Return a list of Jordan cells of current matrix. This list shape Jordan matrix J.
If calc_transformation is specified as False, then transformation P such that
`J = P^{-1} \cdot M \cdot P`
will not be calculated.
Notes =====
Calculation of transformation P is not implemented yet.
Examples ========
>>> from sympy import Matrix >>> m = Matrix(4, 4, [ ... 6, 5, -2, -3, ... -3, -1, 3, 3, ... 2, 1, -2, -3, ... -1, 1, 5, 5])
>>> P, Jcells = m.jordan_cells() >>> Jcells[0] Matrix([ [2, 1], [0, 2]]) >>> Jcells[1] Matrix([ [2, 1], [0, 2]])
See Also ========
jordan_form """
# Order according to default_sort_key, this makes sure the order is the same as in .diagonalize():
"""Return a list of integers with sum equal to 'algebraical' and length equal to 'geometrical'""" n1 = algebraical // geometrical res = [n1]*geometrical res[len(res) - 1] += algebraical % geometrical assert sum(res) == algebraical return res
"""Test whether any subexpression matches any of the patterns.
Examples ========
>>> from sympy import Matrix, Float >>> from sympy.abc import x, y >>> A = Matrix(((1, x), (0.2, 3))) >>> A.has(x) True >>> A.has(y) False >>> A.has(Float) True """
"""Returns the dual of a matrix, which is:
`(1/2)*levicivita(i, j, k, l)*M(k, l)` summed over indices `k` and `l`
Since the levicivita method is anti_symmetric for any pairwise exchange of indices, the dual of a symmetric matrix is the zero matrix. Strictly speaking the dual defined here assumes that the 'matrix' `M` is a contravariant anti_symmetric second rank tensor, so that the dual is a covariant second rank tensor.
"""
def hstack(cls, *args): """Return a matrix formed by joining args horizontally (i.e. by repeated application of row_join).
Examples ========
>>> from sympy.matrices import Matrix, eye >>> Matrix.hstack(eye(2), 2*eye(2)) Matrix([ [1, 0, 2, 0], [0, 1, 0, 2]]) """
def vstack(cls, *args): """Return a matrix formed by joining args vertically (i.e. by repeated application of col_join).
Examples ========
>>> from sympy.matrices import Matrix, eye >>> Matrix.vstack(eye(2), 2*eye(2)) Matrix([ [1, 0], [0, 1], [2, 0], [0, 2]]) """
"""Concatenates two matrices along self's last and rhs's first column
Examples ========
>>> from sympy import zeros, ones >>> M = zeros(3) >>> V = ones(3, 1) >>> M.row_join(V) Matrix([ [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1]])
See Also ========
row col_join """ "`self` and `rhs` must have the same number of rows.")
"""Concatenates two matrices along self's last and bott's first row
Examples ========
>>> from sympy import zeros, ones >>> M = zeros(3) >>> V = ones(1, 3) >>> M.col_join(V) Matrix([ [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 1]])
See Also ========
col row_join """ "`self` and `bott` must have the same number of columns.")
"""Insert one or more rows at the given row position.
Examples ========
>>> from sympy import zeros, ones >>> M = zeros(3) >>> V = ones(1, 3) >>> M.row_insert(1, V) Matrix([ [0, 0, 0], [1, 1, 1], [0, 0, 0], [0, 0, 0]])
See Also ========
row col_insert """
"`self` and `mti` must have the same number of columns.")
"""Insert one or more columns at the given column position.
Examples ========
>>> from sympy import zeros, ones >>> M = zeros(3) >>> V = ones(3, 1) >>> M.col_insert(1, V) Matrix([ [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0]])
See Also ========
col row_insert """
"""Replaces Function F in Matrix entries with Function G.
Examples ========
>>> from sympy import symbols, Function, Matrix >>> F, G = symbols('F, G', cls=Function) >>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M Matrix([ [F(0), F(1)], [F(1), F(2)]]) >>> N = M.replace(F,G) >>> N Matrix([ [G(0), G(1)], [G(1), G(2)]]) """
"""Calculate the Moore-Penrose pseudoinverse of the matrix.
The Moore-Penrose pseudoinverse exists and is unique for any matrix. If the matrix is invertible, the pseudoinverse is the same as the inverse.
Examples ========
>>> from sympy import Matrix >>> Matrix([[1, 2, 3], [4, 5, 6]]).pinv() Matrix([ [-17/18, 4/9], [ -1/9, 1/9], [ 13/18, -2/9]])
See Also ========
inv pinv_solve
References ==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse
""" # Trivial case: pseudoinverse of all-zero matrix is its transpose. return AH else: # Matrix is not full rank, so A*AH cannot be inverted. raise NotImplementedError('Rank-deficient matrices are not yet ' 'supported.')
"""Solve Ax = B using the Moore-Penrose pseudoinverse.
There may be zero, one, or infinite solutions. If one solution exists, it will be returned. If infinite solutions exist, one will be returned based on the value of arbitrary_matrix. If no solutions exist, the least-squares solution is returned.
Parameters ==========
B : Matrix The right hand side of the equation to be solved for. Must have the same number of rows as matrix A. arbitrary_matrix : Matrix If the system is underdetermined (e.g. A has more columns than rows), infinite solutions are possible, in terms of an arbitrary matrix. This parameter may be set to a specific matrix to use for that purpose; if so, it must be the same shape as x, with as many rows as matrix A has columns, and as many columns as matrix B. If left as None, an appropriate matrix containing dummy symbols in the form of ``wn_m`` will be used, with n and m being row and column position of each symbol.
Returns =======
x : Matrix The matrix that will satisfy Ax = B. Will have as many rows as matrix A has columns, and as many columns as matrix B.
Examples ========
>>> from sympy import Matrix >>> A = Matrix([[1, 2, 3], [4, 5, 6]]) >>> B = Matrix([7, 8]) >>> A.pinv_solve(B) Matrix([ [ _w0_0/6 - _w1_0/3 + _w2_0/6 - 55/18], [-_w0_0/3 + 2*_w1_0/3 - _w2_0/3 + 1/9], [ _w0_0/6 - _w1_0/3 + _w2_0/6 + 59/18]]) >>> A.pinv_solve(B, arbitrary_matrix=Matrix([0, 0, 0])) Matrix([ [-55/18], [ 1/9], [ 59/18]])
See Also ========
lower_triangular_solve upper_triangular_solve gauss_jordan_solve cholesky_solve diagonal_solve LDLsolve LUsolve QRsolve pinv
Notes =====
This may return either exact solutions or least squares solutions. To determine which, check ``A * A.pinv() * B == B``. It will be True if exact solutions exist, and False if only a least-squares solution exists. Be aware that the left hand side of that equation may need to be simplified to correctly compare to the right hand side.
References ==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse#Obtaining_all_solutions_of_a_linear_system
"""
""" Solves Ax = b using Gauss Jordan elimination.
There may be zero, one, or infinite solutions. If one solution exists, it will be returned. If infinite solutions exist, it will be returned parametrically. If no solutions exist, It will throw ValueError.
Parameters ==========
b : Matrix The right hand side of the equation to be solved for. Must have the same number of rows as matrix A.
freevar : List If the system is underdetermined (e.g. A has more columns than rows), infinite solutions are possible, in terms of an arbitrary values of free variables. Then the index of the free variables in the solutions (column Matrix) will be returned by freevar, if the flag `freevar` is set to `True`.
Returns =======
x : Matrix The matrix that will satisfy Ax = B. Will have as many rows as matrix A has columns, and as many columns as matrix B.
params : Matrix If the system is underdetermined (e.g. A has more columns than rows), infinite solutions are possible, in terms of an arbitrary parameters. These arbitrary parameters are returned as params Matrix.
Examples ========
>>> from sympy import Matrix >>> A = Matrix([[1, 2, 1, 1], [1, 2, 2, -1], [2, 4, 0, 6]]) >>> b = Matrix([7, 12, 4]) >>> sol, params = A.gauss_jordan_solve(b) >>> sol Matrix([ [-2*_tau0 - 3*_tau1 + 2], [ _tau0], [ 2*_tau1 + 5], [ _tau1]]) >>> params Matrix([ [_tau0], [_tau1]])
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]]) >>> b = Matrix([3, 6, 9]) >>> sol, params = A.gauss_jordan_solve(b) >>> sol Matrix([ [-1], [ 2], [ 0]]) >>> params Matrix(0, 1, [])
See Also ========
lower_triangular_solve upper_triangular_solve cholesky_solve diagonal_solve LDLsolve LUsolve QRsolve pinv
References ==========
.. [1] http://en.wikipedia.org/wiki/Gaussian_elimination
"""
# solve by reduced row echelon form
# Bring to block form
# check for existence of solutions # rank of aug Matrix should be equal to rank of coefficient matrix
# Get index of free symbols (free parameter)
# Free parameters
# Full parametric solution
# Undo permutation
else:
""" Get the type of the result when combining matrices of different types.
Currently the strategy is that immutability is contagious.
Examples ========
>>> from sympy import Matrix, ImmutableMatrix >>> from sympy.matrices.matrices import classof >>> M = Matrix([[1, 2], [3, 4]]) # a Mutable Matrix >>> IM = ImmutableMatrix([[1, 2], [3, 4]]) >>> classof(M, IM) <class 'sympy.matrices.immutable.ImmutableMatrix'> """ else: return B.__class__ return A.__class__ except Exception: pass
"""Return integer after making positive and validating against n.""" |