int num_dims, size_t len) {
void* data = allocate_tensor("TF_AllocateTensor", len);
return TF_NewTensor(dtype, dims, num_dims, data, len, deallocate_buffer,
nullptr);
}
TF_Tensor* TF_NewTensor(TF_DataType dtype, const int64_t* dims, int num_dims,
void* data, size_t len,
void (*deallocator)(void* data, size_t len, void* arg),
void* deallocator_arg) {
std::vector<tensorflow::int64> dimvec(num_dims);
for (int i = 0; i < num_dims; ++i) {
dimvec[i] = static_cast<tensorflow::int64>(dims[i]);
}
TF_ManagedBuffer* buf = new TF_ManagedBuffer;
buf->len_ = len;
if (dtype != TF_STRING && dtype != TF_RESOURCE &&
tensorflow::DataTypeCanUseMemcpy(static_cast<DataType>(dtype)) &&
reinterpret_cast<intptr_t>(data) % EIGEN_MAX_ALIGN_BYTES != 0) {
// TF_STRING and TF_RESOURCE tensors have a different representation in
// TF_Tensor than they do in tensorflow::Tensor. So a copy here is a waste
// (any alignment requirements will be taken care of by TF_TensorToTensor
// and TF_TensorFromTensor).
//
// Other types have the same representation, so copy only if it is safe to
// do so.
buf->data_ = allocate_tensor("TF_NewTensor", len);
std::memcpy(buf->data_, data, len);
buf->deallocator_ = deallocate_buffer;
buf->deallocator_arg_ = nullptr;
// Free the original buffer.
deallocator(data, len, deallocator_arg);
} else {
buf->data_ = data;
buf->deallocator_ = deallocator;
buf->deallocator_arg_ = deallocator_arg;
}
TF_Tensor* ret = new TF_Tensor{dtype, TensorShape(dimvec), buf};
size_t elem_size = TF_DataTypeSize(dtype);
if (elem_size > 0 && len < (elem_size * ret->shape.num_elements())) {
delete ret;
return nullptr;
}
return ret;
}
没有评论:
发表评论