Skip to content

Commit

Permalink
Sync from upstream TF.
Browse files Browse the repository at this point in the history
  • Loading branch information
TFLM-bot committed Feb 17, 2025
1 parent ef64591 commit 388477e
Show file tree
Hide file tree
Showing 3 changed files with 119 additions and 3 deletions.
2 changes: 1 addition & 1 deletion tensorflow/lite/core/c/c_api_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ extern "C" {
#ifdef TFL_COMPILE_LIBRARY
#define TFL_CAPI_EXPORT __declspec(dllexport)
#else
#define TFL_CAPI_EXPORT __declspec(dllimport)
#define TFL_CAPI_EXPORT
#endif // TFL_COMPILE_LIBRARY
#else
#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
Expand Down
113 changes: 111 additions & 2 deletions tensorflow/lite/core/c/common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,56 @@ void TfLiteVarArrayFree(T* a) {
free(a);
}

TfLiteQuantization TfLiteQuantizationClone(const TfLiteQuantization& src) {
TfLiteQuantization dst;
dst.type = src.type;
switch (src.type) {
case kTfLiteNoQuantization:
break;
case kTfLiteAffineQuantization: {
dst.params = calloc(1, sizeof(TfLiteAffineQuantization));
const TfLiteAffineQuantization* const src_params =
(TfLiteAffineQuantization*)(src.params);
TfLiteAffineQuantization* const dst_params =
(TfLiteAffineQuantization*)(dst.params);
dst_params->quantized_dimension = src_params->quantized_dimension;
dst_params->scale = TfLiteFloatArrayCopy(src_params->scale);
dst_params->zero_point = TfLiteIntArrayCopy(src_params->zero_point);
break;
}
}
return dst;
}

TfLiteSparsity TfLiteSparsityClone(const TfLiteSparsity& src) {
TfLiteSparsity dst = src;
dst.traversal_order = TfLiteIntArrayCopy(src.traversal_order);
dst.block_map = TfLiteIntArrayCopy(src.block_map);
if (src.dim_metadata) {
dst.dim_metadata = reinterpret_cast<TfLiteDimensionMetadata*>(
calloc(1, sizeof(TfLiteDimensionMetadata) * src.dim_metadata_size));
for (int i = 0; i < src.dim_metadata_size; ++i) {
dst.dim_metadata[i] = src.dim_metadata[i];
dst.dim_metadata[i].array_segments =
TfLiteIntArrayCopy(src.dim_metadata[i].array_segments);
dst.dim_metadata[i].array_indices =
TfLiteIntArrayCopy(src.dim_metadata[i].array_indices);
}
}
return dst;
}

// Clones the source sparsity to a newly allocated object.
TfLiteSparsity* TfLiteSparsityClone(const TfLiteSparsity* const src) {
if (!src) {
return nullptr;
}
TfLiteSparsity* dst =
reinterpret_cast<TfLiteSparsity*>(calloc(1, sizeof(TfLiteSparsity)));
*dst = TfLiteSparsityClone(*src);
return dst;
}

} // namespace

extern "C" {
Expand Down Expand Up @@ -234,6 +284,55 @@ void TfLiteTensorFree(TfLiteTensor* t) {
t->sparsity = nullptr;
}

TfLiteTensor TfLiteTensorClone(const TfLiteTensor src) {
// We copy all of the source data first, then we clone the fields that can't
// be shared between two tensor instances.
TfLiteTensor dst = src;
// Data that is owned by the original tensor mut be cloned. Check
// TfLiteTensorFree to find out which members are owned.
if (src.data.data) {
const TfLiteAllocationStrategy allocation_strategy =
TfLiteTensorGetAllocationStrategy(&src);
switch (allocation_strategy) {
case kTfLiteAllocationStrategyUnknown:
// We don't know the allocation strategy, which means that the tensor
// doesn't own its data: we keep the copied pointer to the data.
break;
case kTfLiteAllocationStrategyNone:
break;
case kTfLiteAllocationStrategyMMap:
// Mmapped data is read-only and external to the interpreter. We keep
// the copied pointer to the data.
break;
case kTfLiteAllocationStrategyArena:
// Arena tensors are allocated when the graph is prepared. There is no
// data associated to such a tensor between runs so we don't care about
// the value of `data`.
break;
case kTfLiteAllocationStrategyMalloc:
dst.data.data = malloc(src.bytes);
std::memcpy(dst.data.data, src.data.data, src.bytes);
break;
case kTfLiteAllocationStrategyNew:
// Special case for variant objects. They are allocated using new/delete
// but require using the `CloneTo` function.
if (src.allocation_type == kTfLiteVariantObject) {
dst.data.data = reinterpret_cast<const VariantData*>(src.data.data)
->CloneTo(nullptr);
} else {
dst.data.data = new char[src.bytes];
std::memcpy(dst.data.data, src.data.data, src.bytes);
}
break;
}
}
dst.dims = TfLiteIntArrayCopy(src.dims);
dst.dims_signature = TfLiteIntArrayCopy(src.dims_signature);
dst.quantization = TfLiteQuantizationClone(src.quantization);
dst.sparsity = TfLiteSparsityClone(src.sparsity);
return dst;
}

void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
TfLiteQuantizationParams quantization, char* buffer,
size_t size, TfLiteAllocationType allocation_type,
Expand Down Expand Up @@ -399,11 +498,13 @@ TfLiteAllocationStrategy TfLiteTensorGetAllocationStrategy(
case kTfLiteDynamic:
return kTfLiteAllocationStrategyMalloc;
case kTfLitePersistentRo:
return kTfLiteAllocationStrategyUnknown;
return kTfLiteAllocationStrategyMalloc;
case kTfLiteCustom:
return kTfLiteAllocationStrategyUnknown;
case kTfLiteVariantObject:
return kTfLiteAllocationStrategyNew;
case kTfLiteNonCpu:
return kTfLiteAllocationStrategyUnknown;
}
return kTfLiteAllocationStrategyUnknown;
}
Expand All @@ -428,6 +529,8 @@ TfLiteRunStability TfLiteTensorGetBufferAddressStability(
return kTfLiteRunStabilityUnknown;
case kTfLiteVariantObject:
return kTfLiteRunStabilityAcrossRuns;
case kTfLiteNonCpu:
return kTfLiteRunStabilityUnknown;
}
return kTfLiteRunStabilityUnknown;
}
Expand All @@ -451,6 +554,8 @@ TfLiteRunStability TfLiteTensorGetDataStability(const TfLiteTensor* const t) {
return kTfLiteRunStabilityUnknown;
case kTfLiteVariantObject:
return kTfLiteRunStabilitySingleRun;
case kTfLiteNonCpu:
return kTfLiteRunStabilityUnknown;
}
return kTfLiteRunStabilityUnknown;
}
Expand All @@ -477,11 +582,13 @@ TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t) {
return kTfLiteRunStepUnknown;
case kTfLiteVariantObject:
return kTfLiteRunStepEval;
case kTfLiteNonCpu:
return kTfLiteRunStepUnknown;
}
return kTfLiteRunStepUnknown;
}

// Returns the operation steop when the shape of a tensor is computed.
// Returns the operation step when the shape of a tensor is computed.
//
// Some operations can precompute the shape of their results before the
// evaluation step. This makes the shape available earlier for subsequent
Expand All @@ -504,6 +611,8 @@ TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t) {
return kTfLiteRunStepUnknown;
case kTfLiteVariantObject:
return kTfLiteRunStepEval;
case kTfLiteNonCpu:
return kTfLiteRunStepUnknown;
}
return kTfLiteRunStepUnknown;
}
Expand Down
7 changes: 7 additions & 0 deletions tensorflow/lite/core/c/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,9 @@ typedef union TfLitePtrUnion {
/// * `kTfLiteVariantObject`: Allocation is an arbitrary type-erased C++
/// object.
/// Allocation and deallocation are done through `new` and `delete`.
/// * `kTfLiteNonCpu`: Tensor buffer is in non-CPU memory, such as AHWB, GPU
/// memory. This tensor is not accessed by the CPU.
/// This is only used by LiteRt API.
typedef enum TfLiteAllocationType {
kTfLiteMemNone = 0,
kTfLiteMmapRo,
Expand All @@ -405,6 +408,7 @@ typedef enum TfLiteAllocationType {
kTfLitePersistentRo,
kTfLiteCustom,
kTfLiteVariantObject,
kTfLiteNonCpu,
} TfLiteAllocationType;

/// Memory allocation strategies.
Expand Down Expand Up @@ -734,6 +738,9 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
/// quantization, sparsity, ...
TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst);

/// Returns a tensor holding a deep copy of src.
TfLiteTensor TfLiteTensorClone(TfLiteTensor src);

/// Change the size of the memory block owned by `tensor` to `num_bytes`.
/// Tensors with allocation types other than `kTfLiteDynamic` will be ignored
/// and a `kTfLiteOk` will be returned. `tensor`'s internal data buffer will be
Expand Down

0 comments on commit 388477e

Please sign in to comment.