Share via


MLCTensor.CreateByQuantizing Method

Definition

Overloads

CreateByQuantizing(MLCDataType, Single, IntPtr)

[Foundation.Export("tensorByQuantizingToType:scale:bias:")]
[ObjCRuntime.BindingImpl(ObjCRuntime.BindingImplOptions.GeneratedCode | ObjCRuntime.BindingImplOptions.Optimizable)]
public virtual MLCompute.MLCTensor? CreateByQuantizing(MLCompute.MLCDataType type, float scale, IntPtr bias);
[<Foundation.Export("tensorByQuantizingToType:scale:bias:")>]
[<ObjCRuntime.BindingImpl(ObjCRuntime.BindingImplOptions.GeneratedCode | ObjCRuntime.BindingImplOptions.Optimizable)>]
abstract member CreateByQuantizing : MLCompute.MLCDataType * single * nativeint -> MLCompute.MLCTensor
override this.CreateByQuantizing : MLCompute.MLCDataType * single * nativeint -> MLCompute.MLCTensor

Parameters

scale
Single
bias
IntPtr

nativeint

Returns

Attributes

Applies to

CreateByQuantizing(MLCDataType, MLCTensor, MLCTensor, IntPtr)

[Foundation.Export("tensorByQuantizingToType:scale:bias:axis:")]
[ObjCRuntime.BindingImpl(ObjCRuntime.BindingImplOptions.GeneratedCode | ObjCRuntime.BindingImplOptions.Optimizable)]
public virtual MLCompute.MLCTensor? CreateByQuantizing(MLCompute.MLCDataType type, MLCompute.MLCTensor scale, MLCompute.MLCTensor bias, IntPtr axis);
[<Foundation.Export("tensorByQuantizingToType:scale:bias:axis:")>]
[<ObjCRuntime.BindingImpl(ObjCRuntime.BindingImplOptions.GeneratedCode | ObjCRuntime.BindingImplOptions.Optimizable)>]
abstract member CreateByQuantizing : MLCompute.MLCDataType * MLCompute.MLCTensor * MLCompute.MLCTensor * nativeint -> MLCompute.MLCTensor
override this.CreateByQuantizing : MLCompute.MLCDataType * MLCompute.MLCTensor * MLCompute.MLCTensor * nativeint -> MLCompute.MLCTensor

Parameters

scale
MLCTensor
bias
MLCTensor
axis
IntPtr

nativeint

Returns

Attributes

Applies to