float in_val_one = 4.0f;
float const_two = 2.0f;
TF_Tensor * tensor_in = TF_NewTensor(TF_FLOAT, NULL, 0, &in_val_one, sizeof(float), tensor_free_none, NULL);
TF_Tensor * tensor_out = NULL; // easy access after this is allocated by TF_SessionRun
TF_Tensor * tensor_const_two = TF_NewTensor(TF_FLOAT, NULL, 0, &const_two, sizeof(float), tensor_free_none, NULL);
// Operations
TF_Operation * feed = PlaceHolder(graph, status, TF_FLOAT, "feed");
TF_Operation * two = Const(graph, status, tensor_const_two, "const");
TF_Operation * add = Add(graph, status, feed, two, "add");
// Session Inputs
TF_Output input_operations[] = { feed, 0 };
TF_Tensor ** input_tensors = {&tensor_in};
// Session Outputs
TF_Output output_operations[] = { add, 0 };
TF_Tensor ** output_tensors = {&tensor_out};
TF_SessionRun(session, NULL,
// Inputs
input_operations, input_tensors, 1,
// Outputs
output_operations, output_tensors, 1,
// Target operations
NULL, 0, NULL,
status);
How it links input_operations with input_tensors? By name.
Each TF_Output contains an operation and an index, indicating the nth output of the operation. Each operation contains a node. every node has its own name.
// Convert from TF_Output and TF_Tensor to a string and Tensor.
std::vector<std::pair<string, Tensor>> input_pairs(ninputs);
if (!TF_Run_Inputs(input_values, &input_pairs, status)) return;
for (int i = 0; i < ninputs; ++i) {
input_pairs[i].first = OutputName(inputs[i]);
}
By using pair<>, link a string which is the name of a node with a tensor. Note the tensor is newly generated.
static bool TF_Run_Inputs(TF_Tensor* const* c_inputs,
std::vector<std::pair<string, Tensor>>* input_pairs,
TF_Status* status) {
const int ninputs = input_pairs->size();
for (int i = 0; i < ninputs; ++i) {
status->status = TF_TensorToTensor(c_inputs[i], &(*input_pairs)[i].second);
if (!status->status.ok()) return false;
}
return true;
}
Later vector pair<> becomes NamedTensorList;
typedef std::vector<std::pair<string, Tensor>> NamedTensorList;
Then we can see them in class DirectSession : public Session
::tensorflow::Status Run(const NamedTensorList& inputs,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
std::vector<Tensor>* outputs) override;
Those tensors will be packed again to feed_args, then passed to call_frame.SetArgs(feed_args);
Status DirectSession::Run(const RunOptions& run_options,
const NamedTensorList& inputs,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) {
TF_RETURN_IF_ERROR(CheckNotClosed());
TF_RETURN_IF_ERROR(CheckGraphCreated("Run()"));
direct_session_runs->GetCell()->IncrementBy(1);
// Extract the inputs names for this run of the session.
std::vector<string> input_tensor_names;
input_tensor_names.reserve(inputs.size());
for (const auto& it : inputs) {
input_tensor_names.push_back(it.first);
}
// Check if we already have an executor for these arguments.
ExecutorsAndKeys* executors_and_keys;
RunStateArgs run_state_args(run_options.debug_options());
TF_RETURN_IF_ERROR(GetOrCreateExecutors(input_tensor_names, output_names,
target_nodes, &executors_and_keys,
&run_state_args));
// Configure a call frame for the step, which we use to feed and
// fetch values to and from the executors.
FunctionCallFrame call_frame(executors_and_keys->input_types,
executors_and_keys->output_types);
gtl::InlinedVector<Tensor, 4> feed_args(inputs.size());
for (const auto& it : inputs) {
if (it.second.dtype() == DT_RESOURCE) {
Tensor tensor_from_handle;
TF_RETURN_IF_ERROR(
ResourceHandleToInputTensor(it.second, &tensor_from_handle));
feed_args[executors_and_keys->input_name_to_index[it.first]] =
tensor_from_handle;
} else {
feed_args[executors_and_keys->input_name_to_index[it.first]] = it.second;
}
}
const Status s = call_frame.SetArgs(feed_args);
if (errors::IsInternal(s)) {
return errors::InvalidArgument(s.error_message());
} else if (!s.ok()) {
A graph may contain several subgraphs. For example, the one that initializes variables, you should first execute that to get random matrix or some constants ready.
Each of those is represented by struct ExecutorsAndKeys;
// An ExecutorsAndKeys is created for a given set of feeds/fetches.
// 'step_count' is the number of times this graph is executed.
// 'graph' is the entire graph being executed. 'name_to_node'
// maps node name to node. We keep 'graph' and 'name_to_node' only in
// the case of partial runs. Each item in 'items' is the executor for
// a partition of the graph bundled with its dependent library runtime.
// 'input_keys' are the rendezvous keys for the feeds and 'output_keys'
// are rendezvous keys for the fetches.
struct ExecutorsAndKeys {
ExecutorsAndKeys() : step_count(0) {}
std::atomic_int_fast64_t step_count;
std::unique_ptr<Graph> graph;
NameNodeMap name_to_node;
std::vector<PerPartitionExecutorsAndLib> items;
std::unordered_map<string, size_t> input_name_to_index;
std::unordered_map<string, string> input_name_to_rendezvous_key;
std::unordered_map<string, size_t> output_name_to_index;
std::unordered_map<string, string> output_name_to_rendezvous_key;
DataTypeVector input_types;
DataTypeVector output_types;
CallableOptions callable_options;
};