Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automated sync from github.com/tensorflow/tensorflow #3033

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion tensorflow/lite/core/c/c_api_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ extern "C" {
#ifdef TFL_COMPILE_LIBRARY
#define TFL_CAPI_EXPORT __declspec(dllexport)
#else
#define TFL_CAPI_EXPORT __declspec(dllimport)
#define TFL_CAPI_EXPORT
#endif // TFL_COMPILE_LIBRARY
#else
#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
Expand Down
113 changes: 111 additions & 2 deletions tensorflow/lite/core/c/common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,56 @@ void TfLiteVarArrayFree(T* a) {
free(a);
}

TfLiteQuantization TfLiteQuantizationClone(const TfLiteQuantization& src) {
TfLiteQuantization dst;
dst.type = src.type;
switch (src.type) {
case kTfLiteNoQuantization:
break;
case kTfLiteAffineQuantization: {
dst.params = calloc(1, sizeof(TfLiteAffineQuantization));
const TfLiteAffineQuantization* const src_params =
(TfLiteAffineQuantization*)(src.params);
TfLiteAffineQuantization* const dst_params =
(TfLiteAffineQuantization*)(dst.params);
dst_params->quantized_dimension = src_params->quantized_dimension;
dst_params->scale = TfLiteFloatArrayCopy(src_params->scale);
dst_params->zero_point = TfLiteIntArrayCopy(src_params->zero_point);
break;
}
}
return dst;
}

TfLiteSparsity TfLiteSparsityClone(const TfLiteSparsity& src) {
TfLiteSparsity dst = src;
dst.traversal_order = TfLiteIntArrayCopy(src.traversal_order);
dst.block_map = TfLiteIntArrayCopy(src.block_map);
if (src.dim_metadata) {
dst.dim_metadata = reinterpret_cast<TfLiteDimensionMetadata*>(
calloc(1, sizeof(TfLiteDimensionMetadata) * src.dim_metadata_size));
for (int i = 0; i < src.dim_metadata_size; ++i) {
dst.dim_metadata[i] = src.dim_metadata[i];
dst.dim_metadata[i].array_segments =
TfLiteIntArrayCopy(src.dim_metadata[i].array_segments);
dst.dim_metadata[i].array_indices =
TfLiteIntArrayCopy(src.dim_metadata[i].array_indices);
}
}
return dst;
}

// Clones the source sparsity to a newly allocated object.
TfLiteSparsity* TfLiteSparsityClone(const TfLiteSparsity* const src) {
if (!src) {
return nullptr;
}
TfLiteSparsity* dst =
reinterpret_cast<TfLiteSparsity*>(calloc(1, sizeof(TfLiteSparsity)));
*dst = TfLiteSparsityClone(*src);
return dst;
}

} // namespace

extern "C" {
Expand Down Expand Up @@ -234,6 +284,55 @@ void TfLiteTensorFree(TfLiteTensor* t) {
t->sparsity = nullptr;
}

TfLiteTensor TfLiteTensorClone(const TfLiteTensor src) {
// We copy all of the source data first, then we clone the fields that can't
// be shared between two tensor instances.
TfLiteTensor dst = src;
// Data that is owned by the original tensor mut be cloned. Check
// TfLiteTensorFree to find out which members are owned.
if (src.data.data) {
const TfLiteAllocationStrategy allocation_strategy =
TfLiteTensorGetAllocationStrategy(&src);
switch (allocation_strategy) {
case kTfLiteAllocationStrategyUnknown:
// We don't know the allocation strategy, which means that the tensor
// doesn't own its data: we keep the copied pointer to the data.
break;
case kTfLiteAllocationStrategyNone:
break;
case kTfLiteAllocationStrategyMMap:
// Mmapped data is read-only and external to the interpreter. We keep
// the copied pointer to the data.
break;
case kTfLiteAllocationStrategyArena:
// Arena tensors are allocated when the graph is prepared. There is no
// data associated to such a tensor between runs so we don't care about
// the value of `data`.
break;
case kTfLiteAllocationStrategyMalloc:
dst.data.data = malloc(src.bytes);
std::memcpy(dst.data.data, src.data.data, src.bytes);
break;
case kTfLiteAllocationStrategyNew:
// Special case for variant objects. They are allocated using new/delete
// but require using the `CloneTo` function.
if (src.allocation_type == kTfLiteVariantObject) {
dst.data.data = reinterpret_cast<const VariantData*>(src.data.data)
->CloneTo(nullptr);
} else {
dst.data.data = new char[src.bytes];
std::memcpy(dst.data.data, src.data.data, src.bytes);
}
break;
}
}
dst.dims = TfLiteIntArrayCopy(src.dims);
dst.dims_signature = TfLiteIntArrayCopy(src.dims_signature);
dst.quantization = TfLiteQuantizationClone(src.quantization);
dst.sparsity = TfLiteSparsityClone(src.sparsity);
return dst;
}

void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
TfLiteQuantizationParams quantization, char* buffer,
size_t size, TfLiteAllocationType allocation_type,
Expand Down Expand Up @@ -399,11 +498,13 @@ TfLiteAllocationStrategy TfLiteTensorGetAllocationStrategy(
case kTfLiteDynamic:
return kTfLiteAllocationStrategyMalloc;
case kTfLitePersistentRo:
return kTfLiteAllocationStrategyUnknown;
return kTfLiteAllocationStrategyMalloc;
case kTfLiteCustom:
return kTfLiteAllocationStrategyUnknown;
case kTfLiteVariantObject:
return kTfLiteAllocationStrategyNew;
case kTfLiteNonCpu:
return kTfLiteAllocationStrategyUnknown;
}
return kTfLiteAllocationStrategyUnknown;
}
Expand All @@ -428,6 +529,8 @@ TfLiteRunStability TfLiteTensorGetBufferAddressStability(
return kTfLiteRunStabilityUnknown;
case kTfLiteVariantObject:
return kTfLiteRunStabilityAcrossRuns;
case kTfLiteNonCpu:
return kTfLiteRunStabilityUnknown;
}
return kTfLiteRunStabilityUnknown;
}
Expand All @@ -451,6 +554,8 @@ TfLiteRunStability TfLiteTensorGetDataStability(const TfLiteTensor* const t) {
return kTfLiteRunStabilityUnknown;
case kTfLiteVariantObject:
return kTfLiteRunStabilitySingleRun;
case kTfLiteNonCpu:
return kTfLiteRunStabilityUnknown;
}
return kTfLiteRunStabilityUnknown;
}
Expand All @@ -477,11 +582,13 @@ TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t) {
return kTfLiteRunStepUnknown;
case kTfLiteVariantObject:
return kTfLiteRunStepEval;
case kTfLiteNonCpu:
return kTfLiteRunStepUnknown;
}
return kTfLiteRunStepUnknown;
}

// Returns the operation steop when the shape of a tensor is computed.
// Returns the operation step when the shape of a tensor is computed.
//
// Some operations can precompute the shape of their results before the
// evaluation step. This makes the shape available earlier for subsequent
Expand All @@ -504,6 +611,8 @@ TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t) {
return kTfLiteRunStepUnknown;
case kTfLiteVariantObject:
return kTfLiteRunStepEval;
case kTfLiteNonCpu:
return kTfLiteRunStepUnknown;
}
return kTfLiteRunStepUnknown;
}
Expand Down
7 changes: 7 additions & 0 deletions tensorflow/lite/core/c/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,9 @@ typedef union TfLitePtrUnion {
/// * `kTfLiteVariantObject`: Allocation is an arbitrary type-erased C++
/// object.
/// Allocation and deallocation are done through `new` and `delete`.
/// * `kTfLiteNonCpu`: Tensor buffer is in non-CPU memory, such as AHWB, GPU
/// memory. This tensor is not accessed by the CPU.
/// This is only used by LiteRt API.
typedef enum TfLiteAllocationType {
kTfLiteMemNone = 0,
kTfLiteMmapRo,
Expand All @@ -405,6 +408,7 @@ typedef enum TfLiteAllocationType {
kTfLitePersistentRo,
kTfLiteCustom,
kTfLiteVariantObject,
kTfLiteNonCpu,
} TfLiteAllocationType;

/// Memory allocation strategies.
Expand Down Expand Up @@ -734,6 +738,9 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
/// quantization, sparsity, ...
TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst);

/// Returns a tensor holding a deep copy of src.
TfLiteTensor TfLiteTensorClone(TfLiteTensor src);

/// Change the size of the memory block owned by `tensor` to `num_bytes`.
/// Tensors with allocation types other than `kTfLiteDynamic` will be ignored
/// and a `kTfLiteOk` will be returned. `tensor`'s internal data buffer will be
Expand Down
Loading