forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
export.h
89 lines (75 loc) · 3.41 KB
/
export.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
#pragma once
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/pickler.h>
#include <torch/csrc/jit/script/module.h>
#include <torch/csrc/onnx/onnx.h>
#include <caffe2/serialize/inline_container.h>
#include <ostream>
namespace torch {
namespace jit {
// This map is used to keep track of parameters that should be exported
// externally. When `defer_weight_export` is true, the returned map contains
// kv pairs that map {external reference name} -> {at::Tensor to be exported}.
// It is the responsibility of the caller to export these appropriately.
//
// For example, when exporting to a zip archive, the caller may write out files
// for each entry in the export map, with the filename being the key and the
// file contents being the raw tensor data.
using RawDataExportMap = std::unordered_map<std::string, at::Tensor>;
TORCH_API std::tuple<std::string, RawDataExportMap> export_onnx(
const std::shared_ptr<Graph>& graph,
const std::map<std::string, at::Tensor>& initializers,
int64_t onnx_opset_version,
const std::unordered_map<std::string, std::unordered_map<int64_t, std::string>>& dynamic_axes,
bool defer_weight_export = false,
::torch::onnx::OperatorExportTypes operator_export_type =
::torch::onnx::OperatorExportTypes::ONNX,
bool strip_doc_string = true,
bool keep_initializers_as_inputs = true,
const std::map<std::string, int>& custom_opsets = {},
bool add_node_names = true);
TORCH_API void check_onnx_proto(const std::string& proto_string);
// For testing purposes
TORCH_API std::string pretty_print_onnx(
const std::shared_ptr<Graph>& graph,
const std::map<std::string, at::Tensor>& initializers,
int64_t onnx_opset_version,
bool defer_weight_export,
::torch::onnx::OperatorExportTypes operator_export_type =
::torch::onnx::OperatorExportTypes::ONNX,
bool google_printer = false,
bool keep_initializers_as_inputs = true,
const std::map<std::string, int>& custom_opsets = {},
bool add_node_names = true);
TORCH_API void ExportModule(
const script::Module& module,
std::ostream& out,
const script::ExtraFilesMap& metadata = script::ExtraFilesMap(),
bool bytecode_format = false);
TORCH_API void ExportModule(
const script::Module& module,
const std::string& filename,
const script::ExtraFilesMap& metadata = script::ExtraFilesMap(),
bool bytecode_format = false);
TORCH_API void ExportModule(
const script::Module& module,
const std::function<size_t(const void*, size_t)>& writer_func,
const script::ExtraFilesMap& metadata = script::ExtraFilesMap(),
bool bytecode_format = false);
// Write the bytes of a pickle archive and the tensors referenced inside that
// archive
TORCH_API void writeArchiveAndTensors(
const std::string& archive_name,
const char* pickle_bytes,
size_t size,
const std::vector<WriteableTensorData>& tensors,
caffe2::serialize::PyTorchStreamWriter& out);
// Surrounding system can install an additional hook to produce extra files
// with metadata based on environment every time a module is serialized.
using ExportModuleExtraFilesHook =
std::function<script::ExtraFilesMap(const script::Module&)>;
TORCH_API void SetExportModuleExtraFilesHook(ExportModuleExtraFilesHook hook);
// Returns a list of names of all operators in the module and its submodules.
TORCH_API std::vector<std::string> export_opnames(const script::Module& m);
} // namespace jit
} // namespace torch