Skip to content

Commit

Permalink
Do not use deprecated usePropertiesAsAttributes=0 for tf_device ops.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 578101119
  • Loading branch information
akuegel authored and tensorflower-gardener committed Oct 31, 2023
1 parent ebdfab6 commit 03a52f1
Show file tree
Hide file tree
Showing 25 changed files with 252 additions and 278 deletions.
4 changes: 2 additions & 2 deletions tensorflow/compiler/mlir/tensorflow/ir/tf_device.cc
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ void ReplicateOp::print(OpAsmPrinter& p) {
// packed_input
// %b as %block_arg1: type
const int32_t n = this->getN();
const int32_t num_replicated_inputs = getOperandSegmentSizes()[0];
const int32_t num_replicated_inputs = getProperties().operandSegmentSizes[0];
const int32_t num_replicated_block_args = num_replicated_inputs / n;

if (getNumOperands()) {
Expand Down Expand Up @@ -502,7 +502,7 @@ LogicalResult ReplicateOp::verify() {

Block& block = op.getBody().front();

auto operandSegmentSizes = op.getOperandSegmentSizes();
auto operandSegmentSizes = op.getProperties().operandSegmentSizes;
const int32_t num_replicated_inputs = operandSegmentSizes[0];
const int32_t num_packed_inputs = operandSegmentSizes[1];

Expand Down
2 changes: 0 additions & 2 deletions tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ def TfDevice_Dialect : Dialect {
}];

let cppNamespace = "::mlir::tf_device";
let usePropertiesForAttributes = 0;
}

//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -263,7 +262,6 @@ For example:
Variadic<AnyType>:$replicated_inputs,
Variadic<AnyType>:$packed_inputs,

DenseI32ArrayAttr:$operandSegmentSizes,
ConfinedAttr<I32Attr, [IntMinValue<2>]>:$n,
OptionalAttr<DictionaryAttr>:$devices
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -544,16 +544,17 @@ func.func @island_not_direct_parent_of_user() -> () {
tf_executor.yield %0 : tensor<i64>
}
// CHECK: "tf_device.launch"()
// CHECK-SAME: <{device = "/job:worker/replica:0/task:0/device:CPU:0"}>
// CHECK: "tf.OpC"(%[[VAL_0]]) : (tensor<i64>) -> ()
// CHECK: "tf.OpD"() : () -> ()
// CHECK: tf_device.return
// CHECK: device = "/job:worker/replica:0/task:0/device:CPU:0"} : () -> ()
// CHECK: }) : () -> ()
%island2 = tf_executor.island {
"tf_device.launch"() ({
"tf_device.launch"() <{device = "/job:worker/replica:0/task:0/device:CPU:0"}> ({
"tf.OpC"(%island1#0) : (tensor<i64>) -> ()
"tf.OpD"() : () -> ()
tf_device.return
}) {device = "/job:worker/replica:0/task:0/device:CPU:0"} : () -> ()
}) : () -> ()
tf_executor.yield
}
// CHECK: tf_executor.fetch
Expand Down
30 changes: 18 additions & 12 deletions tensorflow/compiler/mlir/tensorflow/tests/cluster_formation.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,15 @@ module {
%2 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[TPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK-SAME: <{device = "tpu0"}>
// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[A_OUTPUT]]) : (tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.B"(%2) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[C_OUTPUT:[0-9]*]] = "tf.C"(%[[A_OUTPUT]], %[[B_OUTPUT]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.C"(%2, %3) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>

// CHECK: tf_device.return %[[C_OUTPUT]]
// CHECK: {device = "tpu0"} : () -> tensor<?xi32>
// CHECK: : () -> tensor<?xi32>

// CHECK: %[[D_OUTPUT:[0-9]*]] = "tf.D"(%[[TPU0_OUTPUT]])
%5 = "tf.D"(%4) : (tensor<?xi32>) -> tensor<?xi32>
Expand All @@ -40,14 +41,15 @@ module {
%2 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[TPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK-SAME: <{device = "tpu0"}>
// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[A_OUTPUT]]) : (tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.B"(%2) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[C_OUTPUT:[0-9]*]] = "tf.C"(%[[A_OUTPUT]], %[[B_OUTPUT]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.C"(%2, %3) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>

// CHECK: tf_device.return %[[C_OUTPUT]]
// CHECK: {device = "tpu0"} : () -> tensor<?xi32>
// CHECK: : () -> tensor<?xi32>

// CHECK: %[[D_OUTPUT:[0-9]*]] = "tf.D"(%[[TPU0_OUTPUT]])
%5 = "tf.D"(%4) : (tensor<?xi32>) -> tensor<?xi32>
Expand All @@ -71,14 +73,15 @@ module {
%1:2 = tf_executor.island {

// CHECK: %[[TPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK-SAME: <{device = "tpu0"}>
// CHECK: %[[A_OUTPUT:[0-9]*]] = "tf.A"(%[[ARG_0]]) : (tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.A"(%arg0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[A_OUTPUT]], %[[ARG_0]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.B"(%3, %arg0) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>

// CHECK: tf_device.return %[[B_OUTPUT]]
// CHECK: {device = "tpu0"} : () -> tensor<?xi32>
// CHECK: : () -> tensor<?xi32>

// CHECK: %[[C_OUTPUT:[0-9]*]] = "tf.C"(%[[TPU0_OUTPUT]])
%5 = "tf.C"(%4) : (tensor<?xi32>) -> tensor<?xi32>
Expand All @@ -104,14 +107,15 @@ module {

%2:2 = tf_executor.island {
// CHECK: %[[TPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK: <{device = "tpu0"}>
// CHECK: %[[A_OUTPUT:[0-9]*]] = "tf.A"(%[[ARG_0]]) : (tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.A"(%arg0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[A_OUTPUT]], %[[OTHER_ISLAND_OUTPUT]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.B"(%3, %1#0) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>

// CHECK: tf_device.return %[[B_OUTPUT]]
// CHECK: {device = "tpu0"} : () -> tensor<?xi32>
// CHECK: : () -> tensor<?xi32>

// CHECK: %[[C_OUTPUT:[0-9]*]] = "tf.C"(%[[TPU0_OUTPUT]])
%5 = "tf.C"(%4) : (tensor<?xi32>) -> tensor<?xi32>
Expand All @@ -135,11 +139,12 @@ module {
%1:2 = tf_executor.island {

// CHECK: %[[TPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK: <{device = "tpu0"}>
// CHECK: %[[A_OUTPUT:[0-9]*]] = "tf.A"() : () -> tensor<?xi32>
%3 = "tf.A"() {device = "tpu0"} : () -> tensor<?xi32>

// CHECK: tf_device.return %[[A_OUTPUT]]
// CHECK: {device = "tpu0"} : () -> tensor<?xi32>
// CHECK: : () -> tensor<?xi32>

// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[TPU0_OUTPUT]])
%4 = "tf.B"(%3) : (tensor<?xi32>) -> tensor<?xi32>
Expand All @@ -166,14 +171,15 @@ module {
%2 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[TPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK: <{device = "tpu0"}>
// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[A_OUTPUT]]) : (tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.B"(%2) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[C_OUTPUT:[0-9]*]] = "tf.C"(%[[A_OUTPUT]], %[[B_OUTPUT]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.C"(%2, %3) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>

// CHECK: tf_device.return %[[C_OUTPUT]]
// CHECK: {device = "tpu0"} : () -> tensor<?xi32>
// CHECK: : () -> tensor<?xi32>

// CHECK: %[[GPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK: %[[D_OUTPUT:[0-9]*]] = "tf.D"(%[[TPU0_OUTPUT]]) : (tensor<?xi32>) -> tensor<?xi32>
Expand Down Expand Up @@ -204,14 +210,15 @@ module {
%2 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[TPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK: <{device = "tpu0"}>
// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[A_OUTPUT]]) : (tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.B"(%2) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[C_OUTPUT:[0-9]*]] = "tf.C"(%[[A_OUTPUT]], %[[B_OUTPUT]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.C"(%2, %3) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>

// CHECK: tf_device.return %[[C_OUTPUT]]
// CHECK: {device = "tpu0"} : () -> tensor<?xi32>
// CHECK: : () -> tensor<?xi32>

// CHECK: %[[GPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK: %[[D_OUTPUT:[0-9]*]] = "tf.D"(%[[A_OUTPUT]]) : (tensor<?xi32>) -> tensor<?xi32>
Expand Down Expand Up @@ -248,6 +255,7 @@ module {
// CHECK: %[[C_OUTPUT:[0-9]*]] = "tf.C"(%[[ARG_0]])

// CHECK: %[[TPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK: <{device = "tpu0"}>
// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[A_OUTPUT]]) : (tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.B"(%2) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>

Expand All @@ -257,7 +265,6 @@ module {
%5 = "tf.D"(%2, %3) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>

// CHECK: tf_device.return %[[D_OUTPUT]]
// CHECK: {device = "tpu0"} : () -> tensor<?xi32>

// CHECK: %[[E_OUTPUT:[0-9]*]] = "tf.E"(%[[C_OUTPUT]], %[[TPU0_OUTPUT]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%6 = "tf.E"(%4, %5) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
Expand Down Expand Up @@ -296,12 +303,11 @@ module {
%4 = "tf.C"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[TPU0_OUTPUT1:[0-9]*]] = "tf_device.launch"
// CHECK: <{device = "tpu0"}>
// CHECK: %[[D_OUTPUT:[0-9]*]] = "tf.D"(%[[A_OUTPUT]], %[[TPU0_OUTPUT0]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
// CHECK: tf_device.return %[[D_OUTPUT]]
%5 = "tf.D"(%2, %3) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>

// CHECK: {device = "tpu0"} : () -> tensor<?xi32>

// CHECK: %[[E_OUTPUT:[0-9]*]] = "tf.E"(%[[C_OUTPUT]], %[[TPU0_OUTPUT1]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%6 = "tf.E"(%4, %5) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>

Expand Down Expand Up @@ -358,11 +364,12 @@ module {
%2 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[GPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK: <{device = "gpu0"}>
// CHECK: %[[C_OUTPUT:[0-9]*]] = "tf.C"(%[[ARG_0]])
// CHECK: tf_device.return %[[C_OUTPUT]]
// CHECK: {device = "gpu0"} : () -> tensor<?xi32>

// CHECK: %[[TPU0_OUTPUT:[0-9]*]] = "tf_device.launch"
// CHECK: <{device = "tpu0"}>
// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[A_OUTPUT]]) : (tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.B"(%2) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>

Expand All @@ -372,7 +379,6 @@ module {
%5 = "tf.D"(%2, %3) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>

// CHECK: tf_device.return %[[D_OUTPUT]]
// CHECK: {device = "tpu0"} : () -> tensor<?xi32>

// CHECK: %[[E_OUTPUT:[0-9]*]] = "tf.E"(%[[GPU0_OUTPUT]], %[[TPU0_OUTPUT]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%6 = "tf.E"(%4, %5) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ func.func @single_cluster(%arg0: tensor<?xi32>) -> tensor<?xi32> {
// CHECK: %[[A_OUTPUT:[0-9]*]] = "tf.A"(%[[ARG_0]])
%2 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[CLUSTER_OUTPUT:[0-9]*]] = "tf_device.cluster_func"(%[[A_OUTPUT]]) {func = @[[CLUSTER:.*]]}
// CHECK: %[[CLUSTER_OUTPUT:[0-9]*]] = "tf_device.cluster_func"(%[[A_OUTPUT]]) <{func = @[[CLUSTER:.*]]}>
%3 = "tf_device.cluster"() ({
%4 = "tf.B"(%2) : (tensor<?xi32>) -> tensor<?xi32>
tf_device.return %4 : tensor<?xi32>
Expand Down Expand Up @@ -42,7 +42,7 @@ func.func @multiple_clusters(%arg0: tensor<?xi32>) -> tensor<?xi32> {
// CHECK: %[[A_OUTPUT:[0-9]*]] = "tf.A"(%[[ARG_0]])
%2 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[CLUSTER_0_OUTPUT:[0-9]*]] = "tf_device.cluster_func"(%[[A_OUTPUT]]) {func = @[[CLUSTER_0:.*]]}
// CHECK: %[[CLUSTER_0_OUTPUT:[0-9]*]] = "tf_device.cluster_func"(%[[A_OUTPUT]]) <{func = @[[CLUSTER_0:.*]]}>
%3 = "tf_device.cluster"() ({
%6 = "tf.B"(%2) : (tensor<?xi32>) -> tensor<?xi32>
tf_device.return %6 : tensor<?xi32>
Expand All @@ -51,7 +51,7 @@ func.func @multiple_clusters(%arg0: tensor<?xi32>) -> tensor<?xi32> {
// CHECK: %[[D_OUTPUT:[0-9]*]] = "tf.D"(%[[CLUSTER_0_OUTPUT]])
%4 = "tf.D"(%3) : (tensor<?xi32>) -> tensor<?xi32>

// CHECK: %[[CLUSTER_1_OUTPUT:[0-9]*]] = "tf_device.cluster_func"(%[[CLUSTER_0_OUTPUT]], %[[D_OUTPUT]]) {func = @[[CLUSTER_1:.*]]}
// CHECK: %[[CLUSTER_1_OUTPUT:[0-9]*]] = "tf_device.cluster_func"(%[[CLUSTER_0_OUTPUT]], %[[D_OUTPUT]]) <{func = @[[CLUSTER_1:.*]]}>
%5 = "tf_device.cluster"() ({
%6 = "tf.E"(%3) : (tensor<?xi32>) -> tensor<?xi32>
%7 = "tf.F"(%4, %6) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
Expand Down Expand Up @@ -86,7 +86,7 @@ func.func @multiple_clusters(%arg0: tensor<?xi32>) -> tensor<?xi32> {
func.func @cluster_operands(%arg0: tensor<?xi32>) -> tensor<?xi32> {
%0 = tf_executor.graph {
%1:2 = tf_executor.island wraps
// CHECK: %[[CLUSTER_OUTPUT:[a-z0-9]*]], %{{.*}} = {{.*}} "tf_device.cluster_func"() {func = @[[CLUSTER:.*]]}
// CHECK: %[[CLUSTER_OUTPUT:[a-z0-9]*]], %{{.*}} = {{.*}} "tf_device.cluster_func"() <{func = @[[CLUSTER:.*]]}>
"tf_device.cluster"() ({
%3 = "tf.A"() : () -> tensor<?xi32>
tf_device.return %3 : tensor<?xi32>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
// CHECK-LABEL: func @single_op_launch
func.func @single_op_launch() {
// CHECK: "tf_device.launch"
// CHECK: device = "CPU:0"
// CHECK: "tf.opA"
// CHECK-NOT device
// CHECK: tf_device.return
// CHECK: device = "CPU:0"
"tf.opA"() {device = "CPU:0"} : () -> tensor<i1>
func.return
}
Expand All @@ -16,10 +16,10 @@ func.func @single_op_launch() {
// CHECK-LABEL: func @launch_return
func.func @launch_return() -> tensor<i1> {
// CHECK: %[[LAUNCH_OUT:.*]] = "tf_device.launch"
// CHECK: device = "CPU:0"
// CHECK: %[[A_OUT:.*]] = "tf.opA"
// CHECK-NOT device
// CHECK: tf_device.return %[[A_OUT]]
// CHECK: device = "CPU:0"
// CHECK: return %[[LAUNCH_OUT]]
%a = "tf.opA"() {device = "CPU:0"} : () -> tensor<i1>
func.return %a : tensor<i1>
Expand Down
Loading

0 comments on commit 03a52f1

Please sign in to comment.