Skip to content

Commit

Permalink
repo-sync-2023-09-15T10:19:34+0800
Browse files Browse the repository at this point in the history
  • Loading branch information
anakinxc committed Sep 15, 2023
1 parent 0af5b1f commit 7d529db
Show file tree
Hide file tree
Showing 74 changed files with 767 additions and 660 deletions.
2 changes: 0 additions & 2 deletions .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,6 @@ build:linux-release --action_env=BAZEL_LINKLIBS=-l%:libstdc++.a:-l%:libgcc.a
build:macos --copt="-Xpreprocessor -fopenmp"
build:macos --copt=-Wno-unused-command-line-argument
build:macos --features=-supports_dynamic_linker
build:macos --cxxopt -Wno-deprecated-enum-enum-conversion
build:macos --cxxopt -Wno-deprecated-anon-enum-enum-conversion
build:macos --macos_minimum_os=11.0
build:macos --host_macos_minimum_os=11.0

Expand Down
6 changes: 3 additions & 3 deletions bazel/repositories.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")

SECRETFLOW_GIT = "https://github.com/secretflow"

YACL_COMMIT_ID = "aa107d380e9287a8a5d874feb19dd03be34e4241"
YACL_COMMIT_ID = "5418371c4335f4a64fbd0bdabb0efd94da2af808"

def spu_deps():
_rules_cuda()
Expand Down Expand Up @@ -158,8 +158,8 @@ def _com_github_xtensor_xtl():
)

def _com_github_openxla_xla():
OPENXLA_COMMIT = "0c99beffabc5d43fa29f121674eb59e14a22c779"
OPENXLA_SHA256 = "d4c7511a496aeb917976c0d8a65de374c395546f0c3d4077d9dfd4df780d7ea8"
OPENXLA_COMMIT = "75a7973c2850fcc33278c84e1b62eff8f0ad35f8"
OPENXLA_SHA256 = "4534c3230853e990ac613898c2ff39626d1beacb0c3675fbea502dce3e32f620"

SKYLIB_VERSION = "1.3.0"

Expand Down
7 changes: 2 additions & 5 deletions bazel/spu.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,8 @@ WARNING_FLAGS = [
"-Wextra",
"-Werror",
"-Wno-unused-parameter",
"-Wnon-virtual-dtor",
] + select({
"@bazel_tools//src/conditions:darwin": ["-Wunused-const-variable"],
"//conditions:default": ["-Wunused-const-variable=1"],
})
]

DEBUG_FLAGS = ["-O0", "-g"]
RELEASE_FLAGS = ["-O2"]
FAST_FLAGS = ["-O1"]
Expand Down
4 changes: 2 additions & 2 deletions examples/python/ml/flax_llama7b/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ This example demonstrates how to use SPU to run secure inference on a pre-traine
export PYTHONPATH="${PWD}:$PYTHONPATH"
```

Download trained LLaMA-B[PyTroch-Version] from "https://huggingface.co/openlm-research/open_llama_7b", and convert it to Flax.msgpack as:
Download trained LLaMA-B[PyTroch-Version] from [Hugging Face](https://huggingface.co/openlm-research/open_llama_7b)
, and convert it to Flax.msgpack as:

```sh
python3 -m EasyLM.scripts.convert_checkpoint --load_checkpoint='params::path-to-LLaMA-7B[Pytroch-Version]' --output_file='path-to-LLaMMA-7B.msgpack' --streaming=False
Expand Down Expand Up @@ -54,4 +55,3 @@ This example demonstrates how to use SPU to run secure inference on a pre-traine
```

5. To reproduce the benchmarks results in the [Puma paper](https://arxiv.org/abs/2307.12533), please check [here](https://github.com/AntCPLab/puma_benchmarks).

3 changes: 3 additions & 0 deletions examples/python/ml/stax_nn/stax_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,9 @@ def run_model(model_name, run_cpu=True):
'network_b': models.minionn,
'network_c': models.lenet,
'network_d': models.chameleon,
'alexnet': models.alexnet,
'lenet': models.lenet,
'vgg16': models.vgg16,
}

fn = partial(train_mnist, MODEL_MAPS.get(model_name))
Expand Down
6 changes: 3 additions & 3 deletions libspu/compiler/common/ir_printer_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ IRPrinterConfig::IRPrinterConfig(std::string_view pp_dir)
}
}

void IRPrinterConfig::printBeforeIfEnabled(Pass *pass, Operation *operation,
void IRPrinterConfig::printBeforeIfEnabled(Pass *pass, Operation *,
PrintCallbackFn print_callback) {
std::filesystem::path file_name =
pp_dir_ / genFileName(pass->getName(), "before");
Expand All @@ -56,7 +56,7 @@ void IRPrinterConfig::printBeforeIfEnabled(Pass *pass, Operation *operation,
print_callback(f);
}

void IRPrinterConfig::printAfterIfEnabled(Pass *pass, Operation *operation,
void IRPrinterConfig::printAfterIfEnabled(Pass *pass, Operation *,
PrintCallbackFn print_callback) {
std::filesystem::path file_name =
pp_dir_ / genFileName(pass->getName(), "after");
Expand All @@ -73,4 +73,4 @@ std::string IRPrinterConfig::genFileName(StringRef pass_name, StringRef stage) {
return fmt::format("{}-{}-{}.mlir", pp_cnt++, pass_name.str(), stage.str());
}

} // namespace mlir::pphlo
} // namespace mlir::pphlo
6 changes: 3 additions & 3 deletions libspu/compiler/passes/expand_secret_gather.cc
Original file line number Diff line number Diff line change
Expand Up @@ -340,9 +340,9 @@ TypedValue<RankedTensorType> AdjustBatchDimsInAccumulator(
return ExpandFirstDimIntoNDims(accumulator, batch_dim_bounds);
}

void BuildWhileCondition(Region &cond, Value counter,
Value canonical_start_indices, Value accumulator_init,
Value loop_upper_bound) {
void BuildWhileCondition(Region &cond, Value /*counter*/,
Value /*canonical_start_indices*/,
Value /*accumulator_init*/, Value loop_upper_bound) {
OpBuilder builder(cond);
TypeTools type_tool;

Expand Down
2 changes: 1 addition & 1 deletion libspu/compiler/passes/hlo_legalize_to_pphlo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -873,7 +873,7 @@ class HloToPPHloOpConverter<stablehlo::ReturnOp>
: public OpConversionPattern<stablehlo::ReturnOp> {
public:
HloToPPHloOpConverter(TypeConverter &type_converter, MLIRContext *context,
const ValueVisibilityMap &vis)
const ValueVisibilityMap &)
: OpConversionPattern<stablehlo::ReturnOp>(type_converter, context) {}

LogicalResult
Expand Down
3 changes: 1 addition & 2 deletions libspu/compiler/passes/optimize_select.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,7 @@ struct SelectConversion : public OpRewritePattern<SelectOp> {
explicit SelectConversion(MLIRContext *context)
: OpRewritePattern<SelectOp>(context) {}

LogicalResult matchAndRewrite(SelectOp op,
PatternRewriter &rewriter) const override {
LogicalResult matchAndRewrite(SelectOp op, PatternRewriter &) const override {
auto pred = op.getPred();
// Only do this for certain select...
if (pred.getDefiningOp<PreferAOp>() != nullptr) {
Expand Down
3 changes: 1 addition & 2 deletions libspu/compiler/tools/mlir-pphlo-opt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
// #include "mlir-hlo/Dialect/mhlo/transforms/register_passes.h"
#include "mlir/Dialect/Func/Extensions/InlinerExtension.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Tools/mlir-opt/MlirOptMain.h"
#include "mlir/Transforms/Passes.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"

#include "libspu/compiler/passes/register_passes.h"
Expand Down
2 changes: 1 addition & 1 deletion libspu/core/cexpr.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class ConstantExpr : public BaseExpr {
public:
explicit ConstantExpr(Value val) : val_(val) {}
std::string expr() const override { return fmt::format("{}", val_); }
Value eval(const Params& params) const override { return val_; }
Value eval(const Params&) const override { return val_; }
};

class VariableExpr : public BaseExpr {
Expand Down
4 changes: 2 additions & 2 deletions libspu/core/trace.cc
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ std::shared_ptr<spdlog::logger> getTraceLogger() {

} // namespace

void Tracer::logActionBegin(int64_t id, const std::string& mod,
void Tracer::logActionBegin(int64_t, const std::string& mod,
const std::string& name,
const std::string& detail) const {
const auto indent = getIndentString(depth_);
Expand All @@ -176,7 +176,7 @@ void Tracer::logActionBegin(int64_t id, const std::string& mod,
}
}

void Tracer::logActionEnd(int64_t id, const std::string& mod,
void Tracer::logActionEnd(int64_t, const std::string& mod,
const std::string& name,
const std::string& detail) const {
const auto indent = getIndentString(depth_);
Expand Down
2 changes: 1 addition & 1 deletion libspu/core/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ inline std::ostream& operator<<(std::ostream& os,
namespace spu {
namespace internal {

inline void variadicToStringImpl(std::stringstream& ss) {}
inline void variadicToStringImpl(std::stringstream&) {}

template <typename T>
void variadicToStringImpl(std::stringstream& ss, const T& t) {
Expand Down
2 changes: 1 addition & 1 deletion libspu/core/type.h
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ class VoidTy : public TypeImpl<VoidTy, TypeObject> {

std::string toString() const override { return ""; }

bool equals(TypeObject const* other) const override { return true; }
bool equals(TypeObject const*) const override { return true; }
};

// Builtin type, plaintext types.
Expand Down
11 changes: 5 additions & 6 deletions libspu/device/executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ std::vector<spu::Value> runRegion(OpExecutor *executor, //

std::vector<spu::Value> runBlock(OpExecutor *executor, SPUContext *sctx,
SymbolScope *symbols, mlir::Block &block,
absl::Span<spu::Value const> params,
absl::Span<spu::Value const> /*params*/,
const ExecutionOptions &opts) {
for (auto &op : block.without_terminator()) {
executor->runKernel(sctx, symbols, op, opts);
Expand Down Expand Up @@ -273,11 +273,10 @@ class BlockParallelRunner final {
}
};

std::vector<spu::Value> runBlockParallel(OpExecutor *executor, SPUContext *sctx,
SymbolScope *symbols,
mlir::Block &block,
absl::Span<spu::Value const> params,
const ExecutionOptions &opts) {
std::vector<spu::Value> runBlockParallel(
OpExecutor *executor, SPUContext *sctx, SymbolScope *symbols,
mlir::Block &block, absl::Span<spu::Value const> /*params*/,
const ExecutionOptions &opts) {
BlockParallelRunner runner(sctx, executor, symbols, opts);
return runner.run(block);
}
Expand Down
Loading

0 comments on commit 7d529db

Please sign in to comment.