PaddlePaddle
- abs
- acos
- add
- add_n
- addmm
- all
- allclose
- any
- arange
- argmax
- argmin
- argsort
- asin
- assign
- atan
- bernoulli
- bmm
- broadcast_to
- cast
- ceil
- cholesky
- chunk
- clip
- concat
- conj
- cos
- cosh
- CPUPlace
- cross
- CUDAPinnedPlace
- CUDAPlace
- cumsum
- DataParallel
- diag
- disable_static
- dist
- divide
- dot
- empty
- empty_like
- enable_static
- equal
- equal_all
- erf
- exp
- expand
- expand_as
- eye
- flatten
- flip
- floor
- floor_divide
- flops
- full
- full_like
- gather
- gather_nd
- get_cuda_rng_state
- get_cudnn_version
- get_default_dtype
- get_device
- grad
- greater_equal
- greater_than
- histogram
- imag
- in_dynamic_mode
- increment
- index_sample
- index_select
- inverse
- is_compiled_with_cuda
- is_compiled_with_xpu
- is_empty
- is_tensor
- isfinite
- isinf
- isnan
- kron
- less_equal
- less_than
- linspace
- load
- log
- log10
- log1p
- log2
- logical_and
- logical_not
- logical_or
- logical_xor
- logsumexp
- masked_select
- matmul
- max
- maximum
- mean
- median
- meshgrid
- min
- minimum
- mm
- mod
- Model
- multinomial
- multiplex
- multiply
- mv
- no_grad
- nonzero
- norm
- normal
- not_equal
- numel
- ones
- ones_like
- ParamAttr
- pow
- prod
- rand
- randint
- randn
- randperm
- rank
- real
- reciprocal
- reshape
- reshape_
- roll
- round
- rsqrt
- save
- scale
- scatter
- scatter_
- scatter_nd
- scatter_nd_add
- seed
- set_cuda_rng_state
- set_default_dtype
- set_device
- shape
- shard_index
- sign
- sin
- sinh
- slice
- sort
- split
- sqrt
- square
- squeeze
- squeeze_
- stack
- stanh
- std
- strided_slice
- subtract
- sum
- summary
- t
- tan
- tanh
- tanh_
- Tensor
- tile
- to_tensor
- topk
- trace
- transpose
- tril
- triu
- unbind
- uniform
- unique
- unsqueeze
- unsqueeze_
- unstack
- var
- where
- XPUPlace
- zeros
- zeros_like
- create_lod_tensor
- create_random_int_lodtensor
- cuda_pinned_places
- data
- DataFeedDesc
- DataFeeder
- device_guard
- DistributeTranspiler
- DistributeTranspilerConfig
- get_flags
-
- adaptive_pool2d
- adaptive_pool3d
- add_position_encoding
- affine_channel
- affine_grid
- anchor_generator
- argmax
- argmin
- argsort
- array_length
- array_read
- array_write
- assign
- autoincreased_step_counter
- BasicDecoder
- beam_search
- beam_search_decode
- bipartite_match
- box_clip
- box_coder
- box_decoder_and_assign
- bpr_loss
- brelu
- Categorical
- center_loss
- clip
- clip_by_norm
- collect_fpn_proposals
- concat
- cond
- continuous_value_model
- cosine_decay
- create_array
- create_py_reader_by_data
- create_tensor
- crop
- crop_tensor
- cross_entropy
- ctc_greedy_decoder
- cumsum
- data
- DecodeHelper
- Decoder
- deformable_conv
- deformable_roi_pooling
- density_prior_box
- detection_output
- diag
- distribute_fpn_proposals
- double_buffer
- dropout
- dynamic_gru
- dynamic_lstm
- dynamic_lstmp
- DynamicRNN
- edit_distance
- elementwise_add
- elementwise_div
- elementwise_floordiv
- elementwise_max
- elementwise_min
- elementwise_mod
- elementwise_pow
- elementwise_sub
- elu
- embedding
- equal
- expand
- expand_as
- exponential_decay
- eye
- fc
- fill_constant
- filter_by_instag
- flatten
- fsp_matrix
- gather
- gather_nd
- gaussian_random
- gelu
- generate_mask_labels
- generate_proposal_labels
- generate_proposals
- get_tensor_from_selected_rows
- greater_equal
- greater_than
- GreedyEmbeddingHelper
- grid_sampler
- gru_unit
- GRUCell
- hard_shrink
- hard_sigmoid
- hard_swish
- has_inf
- has_nan
- hash
- hsigmoid
- huber_loss
- IfElse
- im2sequence
- image_resize
- image_resize_short
- increment
- inplace_abn
- inverse_time_decay
- iou_similarity
- isfinite
- kldiv_loss
- l2_normalize
- label_smooth
- leaky_relu
- less_equal
- less_than
- linear_chain_crf
- linear_lr_warmup
- locality_aware_nms
- lod_append
- lod_reset
- logsigmoid
- lrn
- lstm
- lstm_unit
- LSTMCell
- margin_rank_loss
- matmul
- matrix_nms
- maxout
- mean
- merge_selected_rows
- mse_loss
- mul
- multiclass_nms
- MultivariateNormalDiag
- natural_exp_decay
- noam_decay
- Normal
- not_equal
- one_hot
- ones
- ones_like
- pad
- pad2d
- pad_constant_like
- piecewise_decay
- pixel_shuffle
- polygon_box_transform
- polynomial_decay
- pool2d
- pool3d
- pow
- prior_box
- prroi_pool
- psroi_pool
- py_reader
- random_crop
- range
- rank_loss
- read_file
- reduce_all
- reduce_any
- reduce_max
- reduce_mean
- reduce_min
- reduce_prod
- reduce_sum
- relu
- relu6
- reorder_lod_tensor_by_rank
- reshape
- resize_bilinear
- resize_nearest
- resize_trilinear
- retinanet_detection_output
- retinanet_target_assign
- reverse
- rnn
- RNNCell
- roi_align
- roi_perspective_transform
- roi_pool
- rpn_target_assign
- sampled_softmax_with_cross_entropy
- SampleEmbeddingHelper
- sampling_id
- scatter
- selu
- sequence_concat
- sequence_conv
- sequence_enumerate
- sequence_expand
- sequence_expand_as
- sequence_first_step
- sequence_last_step
- sequence_mask
- sequence_pad
- sequence_pool
- sequence_reshape
- sequence_reverse
- sequence_scatter
- sequence_slice
- sequence_softmax
- sequence_unpad
- shuffle_channel
- sigmoid_cross_entropy_with_logits
- sigmoid_focal_loss
- sign
- similarity_focus
- size
- smooth_l1
- soft_relu
- softmax
- softplus
- softshrink
- softsign
- space_to_depth
- split
- squeeze
- ssd_loss
- stack
- StaticRNN
- strided_slice
- sum
- sums
- swish
- Switch
- tanh
- tanh_shrink
- target_assign
- teacher_student_sigmoid_loss
- tensor_array_to_tensor
- thresholded_relu
- topk
- TrainingHelper
- unbind
- Uniform
- uniform_random
- unique
- unique_with_counts
- unsqueeze
- warpctc
- where
- While
- while_loop
- yolo_box
- yolov3_loss
- zeros
- zeros_like
- load_op_library
- LoDTensor
- LoDTensorArray
- memory_optimize
- one_hot
- release_memory
- require_version
- set_flags
- Tensor
- Overview
- AdaptiveAvgPool1D
- AdaptiveAvgPool2D
- AdaptiveAvgPool3D
- AdaptiveMaxPool1D
- AdaptiveMaxPool2D
- AdaptiveMaxPool3D
- AlphaDropout
- AvgPool1D
- AvgPool2D
- AvgPool3D
- BatchNorm
- BatchNorm1D
- BatchNorm2D
- BatchNorm3D
- BCELoss
- BCEWithLogitsLoss
- BeamSearchDecoder
- Bilinear
- BiRNN
- ClipGradByGlobalNorm
- ClipGradByNorm
- ClipGradByValue
- Conv1D
- Conv1DTranspose
- Conv2D
- Conv2DTranspose
- Conv3D
- Conv3DTranspose
- CosineSimilarity
- CrossEntropyLoss
- CTCLoss
- Dropout
- Dropout2D
- Dropout3D
- dynamic_decode
- ELU
- Embedding
- Flatten
-
- adaptive_avg_pool1d
- adaptive_avg_pool2d
- adaptive_avg_pool3d
- adaptive_max_pool1d
- adaptive_max_pool2d
- adaptive_max_pool3d
- affine_grid
- alpha_dropout
- avg_pool1d
- avg_pool2d
- avg_pool3d
- batch_norm
- bilinear
- binary_cross_entropy
- binary_cross_entropy_with_logits
- conv1d
- conv1d_transpose
- conv2d
- conv2d_transpose
- conv3d
- conv3d_transpose
- cosine_similarity
- cross_entropy
- ctc_loss
- diag_embed
- dice_loss
- dropout
- dropout2d
- dropout3d
- elu
- elu_
- embedding
- gather_tree
- gelu
- grid_sample
- hardshrink
- hardsigmoid
- hardswish
- hardtanh
- hsigmoid_loss
- instance_norm
- interpolate
- kl_div
- l1_loss
- label_smooth
- layer_norm
- leaky_relu
- linear
- local_response_norm
- log_loss
- log_sigmoid
- log_softmax
- margin_ranking_loss
- max_pool1d
- max_pool2d
- max_pool3d
- maxout
- mse_loss
- nll_loss
- normalize
- npair_loss
- one_hot
- pad
- pixel_shuffle
- prelu
- relu
- relu6
- relu_
- selu
- sigmoid
- sigmoid_focal_loss
- smooth_l1_loss
- softmax
- softmax_
- softmax_with_cross_entropy
- softplus
- softshrink
- softsign
- square_error_cost
- swish
- tanhshrink
- temporal_shift
- thresholded_relu
- unfold
- upsample
- GELU
- GroupNorm
- GRU
- GRUCell
- Hardshrink
- Hardsigmoid
- Hardswish
- Hardtanh
- HSigmoidLoss
- InstanceNorm1D
- InstanceNorm2D
- InstanceNorm3D
- KLDivLoss
- L1Loss
- Layer
- LayerList
- LayerNorm
- LeakyReLU
- Linear
- LocalResponseNorm
- LogSigmoid
- LogSoftmax
- LSTM
- LSTMCell
- MarginRankingLoss
- Maxout
- MaxPool1D
- MaxPool2D
- MaxPool3D
- MSELoss
- MultiHeadAttention
- NLLLoss
- Pad1D
- Pad2D
- Pad3D
- PairwiseDistance
- ParameterList
- PixelShuffle
- PReLU
- ReLU
- ReLU6
- RNN
- RNNCellBase
- SELU
- Sequential
- Sigmoid
- SimpleRNN
- SimpleRNNCell
- SmoothL1Loss
- Softmax
- Softplus
- Softshrink
- Softsign
- SpectralNorm
- Swish
- SyncBatchNorm
- Tanh
- Tanhshrink
- ThresholdedReLU
- Transformer
- TransformerDecoder
- TransformerDecoderLayer
- TransformerEncoder
- TransformerEncoderLayer
- Upsample
- UpsamplingBilinear2D
- UpsamplingNearest2D
- append_backward
- BuildStrategy
- CompiledProgram
- cpu_places
- create_global_var
- create_parameter
- cuda_places
- data
- default_main_program
- default_startup_program
- deserialize_persistables
- deserialize_program
- device_guard
- ExecutionStrategy
- Executor
- global_scope
- gradients
- InputSpec
- load
- load_from_file
- load_inference_model
- load_program_state
- name_scope
- ParallelExecutor
- Program
- program_guard
- py_func
- save
- save_inference_model
- save_to_file
- scope_guard
- serialize_persistables
- serialize_program
- set_program_state
- Variable
- WeightNormParamAttr
-
- adjust_brightness
- adjust_contrast
- adjust_hue
- adjust_saturation
- BaseTransform
- BrightnessTransform
- center_crop
- CenterCrop
- ColorJitter
- Compose
- ContrastTransform
- crop
- Grayscale
- hflip
- HueTransform
- Normalize
- normalize
- Pad
- pad
- RandomCrop
- RandomHorizontalFli
- RandomResizedCrop
- RandomRotation
- RandomVerticalFlip
- Resize
- resize
- rotate
- SaturationTransform
- to_grayscale
- to_tensor
- ToTensor
- Transpose
- vflip
paddle / Tensor
Tensor¶
-
class
paddle.
Tensor
¶
Tensor
是Paddle中最为基础的数据结构,有几种创建Tensor的不同方式:
用预先存在的
data
数据创建1个Tensor,请参考 to_tensor创建一个与其他Tensor具有相同
shape
与dtype
的Tensor,请参考 ones_like 、 zeros_like 、 full_like
-
dtype
¶
查看一个Tensor的数据类型,支持:'bool','float16','float32','float64','uint8','int8','int16','int32','int64' 类型。
代码示例
import paddle x = paddle.to_tensor([1.0, 2.0, 3.0]) print("tensor's grad is: {}".format(x.dtype))
-
grad
¶
查看一个Tensor的梯度,数据类型为numpy.ndarray。
代码示例
import paddle x = paddle.to_tensor([1.0, 2.0, 3.0], stop_gradient=False) y = paddle.to_tensor([4.0, 5.0, 6.0], stop_gradient=False) z = x * y z.backward() print("tensor's grad is: {}".format(x.grad))
-
name
¶
查看一个Tensor的name,Tensor的name是其唯一标识符,为python的字符串类型。
代码示例
import paddle print("Tensor name: ", paddle.to_tensor(1).name) # Tensor name: generated_tensor_0
-
ndim
¶
查看一个Tensor的维度,也称作rank。
代码示例
import paddle print("Tensor's number of dimensition: ", paddle.to_tensor([[1, 2], [3, 4]]).ndim) # Tensor's number of dimensition: 2
-
persistable
¶
查看一个Tensor的persistable属性,该属性为True时表示持久性变量,持久性变量在每次迭代之后都不会删除。模型参数、学习率等Tensor,都是 持久性变量。
代码示例
import paddle print("Whether Tensor is persistable: ", paddle.to_tensor(1).persistable) # Whether Tensor is persistable: false
-
place
¶
查看一个Tensor的设备位置,Tensor可能的设备位置有三种:CPU/GPU/固定内存,其中固定内存也称为不可分页内存或锁页内存, 其与GPU之间具有更高的读写效率,并且支持异步传输,这对网络整体性能会有进一步提升,但其缺点是分配空间过多时可能会降低主机系统的性能, 因为其减少了用于存储虚拟内存数据的可分页内存。
代码示例
import paddle cpu_tensor = paddle.to_tensor(1, place=paddle.CPUPlace()) print(cpu_tensor.place)
-
shape
¶
查看一个Tensor的shape,shape是Tensor的一个重要的概念,其描述了tensor在每个维度上的元素数量。
代码示例
import paddle print("Tensor's shape: ", paddle.to_tensor([[1, 2], [3, 4]]).shape) # Tensor's shape: [2, 2]
-
stop_gradient
¶
查看一个Tensor是否计算并传播梯度,如果stop_gradient为True,则该Tensor不会计算梯度,并会阻绝Autograd的梯度传播。 反之,则会计算梯度并传播梯度。用户自行创建的的Tensor,默认是True,模型参数的stop_gradient都为False。
代码示例
import paddle print("Tensor's stop_gradient: ", paddle.to_tensor([[1, 2], [3, 4]]).stop_gradient) # Tensor's stop_gradient: True
-
abs
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 abs
-
acos
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 acos
-
add
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 add
-
add_n
( inputs, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 add_n
-
addmm
( x, y, beta=1.0, alpha=1.0, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 addmm
-
allclose
( y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 allclose
-
argmax
( axis=None, keepdim=False, dtype=int64, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 argmax
-
argmin
( axis=None, keepdim=False, dtype=int64, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 argmin
-
argsort
( axis=- 1, descending=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 argsort
-
asin
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 asin
-
astype
( dtype ) ¶
将Tensor的类型转换为 dtype
,并返回一个新的Tensor。
- 参数:
-
dtype (str) - 转换后的dtype,支持'bool','float16','float32','float64','int8','int16', 'int32','int64','uint8'。
返回:类型转换后的新的Tensor
返回类型:Tensor
- 代码示例
-
import paddle x = paddle.to_tensor(1.0) print("original tensor's dtype is: {}".format(x.dtype)) print("new tensor's dtype is: {}".format(x.astype('float64').dtype))
-
atan
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 atan
-
backward
( retain_graph=False ) ¶
从当前Tensor开始计算反向的神经网络,传导并计算计算图中Tensor的梯度。
- 参数:
-
retain_graph (bool, optional) - 如果为False,反向计算图将被释放。如果在backward()之后继续添加OP, 需要设置为True,此时之前的反向计算图会保留。将其设置为False会更加节省内存。默认值:False。
返回:无
- 代码示例
-
import paddle import numpy as np x = np.ones([2, 2], np.float32) inputs = [] for _ in range(10): tmp = paddle.to_tensor(x) # if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since # there is no one need gradient on it. tmp.stop_gradient=False inputs.append(tmp) ret = paddle.add_n(inputs) loss = paddle.sum(ret) loss.backward()
-
bmm
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 bmm
-
broadcast_to
( shape, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 expand ,API功能相同。
-
cast
( dtype ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 cast
-
ceil
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 ceil
-
cholesky
( upper=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 cholesky
-
chunk
( chunks, axis=0, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 chunk
-
clear_gradient
( ) ¶
清除当前Tensor的梯度。
返回:无
- 代码示例
-
import paddle import numpy as np x = np.ones([2, 2], np.float32) inputs2 = [] for _ in range(10): tmp = paddle.to_tensor(x) tmp.stop_gradient=False inputs2.append(tmp) ret2 = paddle.add_n(inputs2) loss2 = paddle.sum(ret2) loss2.backward() print(loss2.gradient()) loss2.clear_gradient() print("After clear {}".format(loss2.gradient()))
-
clip
( min=None, max=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 clip
-
clone
( ) ¶
复制当前Tensor,并且保留在原计算图中进行梯度传导。
返回:clone后的Tensor
- 代码示例
-
import paddle x = paddle.to_tensor(1.0, stop_gradient=False) clone_x = x.clone() y = clone_x**2 y.backward() print(clone_x.stop_gradient) # False print(clone_x.grad) # [2.0], support gradient propagation print(x.stop_gradient) # False print(x.grad) # [2.0], clone_x support gradient propagation for x x = paddle.to_tensor(1.0) clone_x = x.clone() clone_x.stop_gradient = False z = clone_x**3 z.backward() print(clone_x.stop_gradient) # False print(clone_x.grad) # [3.0], support gradient propagation print(x.stop_gradient) # True print(x.grad) # None
-
concat
( axis=0, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 concat
-
cos
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 cos
-
cosh
( name=None ) ¶
对该Tensor中的每个元素求双曲余弦。
返回类型:Tensor
请参考 cosh
- 代码示例
-
import paddle x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = paddle.cosh(x) print(out) # [1.08107237 1.02006674 1.00500417 1.04533851]
-
cpu
( ) ¶
将当前Tensor的拷贝到CPU上,且返回的Tensor不保留在原计算图中。
如果当前Tensor已经在CPU上,则不会发生任何拷贝。
返回:拷贝到CPU上的Tensor
- 代码示例
-
import paddle x = paddle.to_tensor(1.0, place=paddle.CUDAPlace(0)) print(x.place) # CUDAPlace(0) y = x.cpu() print(y.place) # CPUPlace
-
cross
( y, axis=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 cross
-
cuda
( device_id=None, blocking=False ) ¶
将当前Tensor的拷贝到GPU上,且返回的Tensor不保留在原计算图中。
如果当前Tensor已经在GPU上,且device_id为None,则不会发生任何拷贝。
- 参数:
-
device_id (int, optional) - 目标GPU的设备Id,默认为None,此时为当前Tensor的设备Id,如果当前Tensor不在GPU上,则为0。
blocking (bool, optional) - 如果为False并且当前Tensor处于固定内存上,将会发生主机到设备端的异步拷贝。否则,会发生同步拷贝。默认为False。
返回:拷贝到GPU上的Tensor
- 代码示例
-
import paddle x = paddle.to_tensor(1.0, place=paddle.CUDAPlace(0)) print(x.place) # CUDAPlace(0) y = x.cpu() print(y.place) # CPUPlace
-
cumsum
( axis=None, dtype=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 cumsum
-
detach
( ) ¶
返回一个新的Tensor,从当前计算图分离。
返回:与当前计算图分离的Tensor。
- 代码示例
-
import paddle import numpy as np data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') linear = paddle.nn.Linear(32, 64) data = paddle.to_tensor(data) x = linear(data) y = x.detach()
-
dim
( ) ¶
查看一个Tensor的维度,也称作rank。
代码示例
import paddle print("Tensor's number of dimensition: ", paddle.to_tensor([[1, 2], [3, 4]]).dim()) # Tensor's number of dimensition: 2
-
dist
( y, p=2 ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 dist
-
divide
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 divide
-
dot
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 dot
-
equal
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 equal
-
equal_all
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 equal_all
-
erf
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 erf
-
exp
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 exp
-
expand
( shape, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 expand
-
expand_as
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 expand_as
-
flatten
( start_axis=0, stop_axis=- 1, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 flatten
-
flip
( axis, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 flip
-
floor
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 floor
-
floor_divide
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 floor_divide
-
floor_mod
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 mod
-
gather
( index, axis=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 gather
-
gather_nd
( index, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 gather_nd
-
gradient
( ) ¶
与 Tensor.grad
相同,查看一个Tensor的梯度,数据类型为numpy.ndarray。
返回:该Tensor的梯度 返回类型:numpy.ndarray
- 代码示例
-
import paddle x = paddle.to_tensor([1.0, 2.0, 3.0], stop_gradient=False) y = paddle.to_tensor([4.0, 5.0, 6.0], stop_gradient=False) z = x * y z.backward() print("tensor's grad is: {}".format(x.grad))
-
greater_equal
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 greater_equal
-
greater_than
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 greater_than
-
histogram
( bins=100, min=0, max=0 ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 histogram
-
increment
( value=1.0, in_place=True ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 increment
-
index_sample
( index ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 index_sample
-
index_select
( index, axis=0, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 index_select
-
inverse
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 inverse
-
is_empty
( cond=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 is_empty
-
isfinite
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 isfinite
-
isinf
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 isinf
-
isnan
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 isnan
-
kron
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 kron
-
less_equal
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 less_equal
-
less_than
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 less_than
-
log
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 log
-
log1p
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 log1p
-
logical_and
( y, out=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 logical_and
-
logical_not
( out=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 logical_not
-
logical_or
( y, out=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 logical_or
-
logical_xor
( y, out=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 logical_xor
-
logsigmoid
( ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 logsigmoid
-
logsumexp
( axis=None, keepdim=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 logsumexp
-
masked_select
( mask, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 masked_select
-
matmul
( y, transpose_x=False, transpose_y=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 matmul
-
max
( axis=None, keepdim=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 max
-
maximum
( y, axis=- 1, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 maximum
-
mean
( axis=None, keepdim=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 mean
-
min
( axis=None, keepdim=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 min
-
minimum
( y, axis=- 1, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 minimum
-
mm
( mat2, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 mm
-
mod
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 mod
-
multiplex
( index ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 multiplex
-
multiply
( y, axis=- 1, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 multiply
-
ndimension
( ) ¶
查看一个Tensor的维度,也称作rank。
代码示例
import paddle print("Tensor's number of dimensition: ", paddle.to_tensor([[1, 2], [3, 4]]).ndimension()) # Tensor's number of dimensition: 2
-
nonzero
( as_tuple=False ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 nonzero
-
norm
( p=fro, axis=None, keepdim=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 norm
-
not_equal
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 not_equal
-
numel
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 numel
-
numpy
( ) ¶
将当前Tensor转化为numpy.ndarray。
返回:Tensor转化成的numpy.ndarray。 返回类型:numpy.ndarray
- 代码示例
-
import paddle import numpy as np data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') linear = paddle.nn.Linear(32, 64) data = paddle.to_tensor(data) x = linear(data) print(x.numpy())
-
pin_memory
( y, name=None ) ¶
将当前Tensor的拷贝到固定内存上,且返回的Tensor不保留在原计算图中。
如果当前Tensor已经在固定内存上,则不会发生任何拷贝。
返回:拷贝到固定内存上的Tensor
- 代码示例
-
import paddle x = paddle.to_tensor(1.0, place=paddle.CUDAPlace(0)) print(x.place) # CUDAPlace(0) y = x.pin_memory() print(y.place) # CUDAPinnedPlace
-
pow
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 pow
-
prod
( axis=None, keepdim=False, dtype=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 prod
-
rank
( ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 rank
-
reciprocal
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 reciprocal
-
remainder
( y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 mod
-
reshape
( shape, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 reshape
-
reverse
( axis, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 reverse
-
roll
( shifts, axis=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 roll
-
round
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 round
-
rsqrt
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 rsqrt
-
scale
( scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 scale
-
scatter
( index, updates, overwrite=True, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 scatter
-
scatter_nd
( updates, shape, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 scatter_nd
-
scatter_nd_add
( index, updates, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 scatter_nd_add
-
set_value
( value ) ¶
设置当前Tensor的值。
- 参数:
-
value (Tensor|np.ndarray) - 需要被设置的值,类型为Tensor或者numpy.array。
- 代码示例
-
import paddle import numpy as np data = np.ones([3, 1024], dtype='float32') linear = paddle.nn.Linear(1024, 4) input = paddle.to_tensor(data) linear(input) # call with default weight custom_weight = np.random.randn(1024, 4).astype("float32") linear.weight.set_value(custom_weight) # change existing weight out = linear(input) # call with different weight
返回:计算后的Tensor
-
shard_index
( index_num, nshards, shard_id, ignore_value=- 1 ) ¶
返回类型:Tensor
请参考 shard_index
-
sign
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 sign
-
sin
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 sin
-
sinh
( name=None ) ¶
对该Tensor中逐个元素求双曲正弦。
- 代码示例
-
import paddle x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) out = x.sinh() print(out) # [-0.41075233 -0.201336 0.10016675 0.30452029]
-
size
( ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 size
-
slice
( axes, starts, ends ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 slice
请参考 softsign
-
sort
( axis=- 1, descending=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 sort
-
split
( num_or_sections, axis=0, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 split
-
sqrt
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 sqrt
-
square
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 square
-
squeeze
( axis=None, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 squeeze
-
stack
( axis=0, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 stack
-
stanh
( scale_a=0.67, scale_b=1.7159, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 stanh
-
std
( axis=None, unbiased=True, keepdim=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 std
-
strided_slice
( axes, starts, ends, strides ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 strided_slice
-
sum
( axis=None, dtype=None, keepdim=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 sum
-
sums
( out=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 sums
-
t
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 t
-
tanh
( name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 tanh
-
tile
( repeat_times, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 tile
-
topk
( k, axis=None, largest=True, sorted=True, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 topk
-
trace
( offset=0, axis1=0, axis2=1, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 trace
-
transpose
( perm, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 transpose
-
unbind
( axis=0 ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 cn_api_paddle_tensor_unbind
-
unique
( return_index=False, return_inverse=False, return_counts=False, axis=None, dtype=int64, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 unique
-
unique_with_counts
( dtype=int32 ) ¶
返回:计算后的Tensor
返回类型:Tensor
-
unsqueeze
( axis, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 unsqueeze
-
unstack
( axis=0, num=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 unstack
-
var
( axis=None, unbiased=True, keepdim=False, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 var
-
where
( x, y, name=None ) ¶
返回:计算后的Tensor
返回类型:Tensor
请参考 where
此页内容是否对您有帮助
感谢反馈!