From f224f5b20ef2ae3f0c2d1dd60ca4168896e10f3d Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Fri, 18 Aug 2023 11:20:53 +0000 Subject: [PATCH 01/13] start to add ffi for op-attrs library --- lib/op-attrs/ffi/src/op-attrs.cc | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/lib/op-attrs/ffi/src/op-attrs.cc b/lib/op-attrs/ffi/src/op-attrs.cc index 828574dc25..69ff3cab80 100644 --- a/lib/op-attrs/ffi/src/op-attrs.cc +++ b/lib/op-attrs/ffi/src/op-attrs.cc @@ -5,10 +5,31 @@ #include "op-attrs/op.h" #include "op-attrs/ops/embedding.h" #include "utils/bidict.h" +#include "utils/exception.h" flexflow_utils_exception_t make_opattrs_exception(flexflow_opattrs_error_code_t); +flexflow_error_t flexflow_opattrs_error_wrap(flexflow_opattrs_error_t e) { + return flexflow_error_wrap(FLEXFLOW_ERROR_SOURCE_OPATTRS, *unwrap_opaque(e)); +} + +flexflow_error_t flexflow_opattrs_error_unwrap( + flexflow_error_t err flexflow_opattrs_error_t *out) { + return flexflow_error_unwrap(err, FLEXFLOW_ERROR_SOURCE_OPATTRS, out); +} + +flexflow_error_t flexflow_opattrs_error_is_ok(flexflow_opattrs_error_t err, + bool *out) { + *out = false; + return status_ok(); +} + +flexflow_error_t flexflow_opattrs_error_get_string(flexflow_opattrs_error_t err, + char **m_out) { + NOT_IMPLEMENTED(); // TODO(lambda) +} + REGISTER_FFI_ENUM(flexflow_param_sync_t, ParamSync, FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_PARAM_SYNC_VALUE, From 565b062c0577b7c6351e716882a1b60280849d98 Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Sat, 19 Aug 2023 12:52:59 +0000 Subject: [PATCH 02/13] add FF_NEW_OPAQUE_TYPE in lib/op-attrs/ffi/include/flexflow/op-attrs.h --- lib/op-attrs/ffi/include/flexflow/op-attrs.h | 45 ++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/lib/op-attrs/ffi/include/flexflow/op-attrs.h b/lib/op-attrs/ffi/include/flexflow/op-attrs.h index 08b8e26f83..30444c673e 100644 --- a/lib/op-attrs/ffi/include/flexflow/op-attrs.h +++ b/lib/op-attrs/ffi/include/flexflow/op-attrs.h @@ -28,6 +28,51 @@ flexflow_error_t flexflow_opattrs_error_destroy(flexflow_opattrs_error_t); // FF_NEW_OPAQUE_TYPE(flexflow_regularizer_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_dim_ordered_t); +FF_NEW_OPAQUE_TYPE(flexflow_ff_dim_t); +FF_NEW_OPAQUE_TYPE(flexflow_parallel_dim_t); +FF_NEW_OPAQUE_TYPE(flexflow_paallel_tensor_dims_t); +FF_NEW_OPAQUE_TYPE(flexflow_parallel_tensor_shape_t); +FF_NEW_OPAQUE_TYPE(flexflow_tensor_shape_t); + +//ops +FF_NEW_OPAQUE_TYPE(flexflow_aggregae_specattrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_aggregate_t); +FF_NEW_OPAQUE_TYPE(flexflow_multihead_attentionattrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_multihead_attentioninputs_t); +FF_NEW_OPAQUE_TYPE(flexflow_batchmatmul_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_batchnorm_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_broadcast_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_cast_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_combine_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_concat_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_conv2d_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_dropout_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_element_binary_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_element_unary_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_embedding_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_flat_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_gather_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_group_by_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_input_attrs); +FF_NEW_OPAQUE_TYPE(flexflow_layernorm_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_l1_regularizer_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_l2_regularizer_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_linear_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_sparse_categorical_crossentropy_loss_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_other_loss_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_noop_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_pool2d_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_reduce_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_reduction_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_repartition_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_replicate_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_reshape_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_reverse_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_softmax_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_split_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_topk_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_transpose_attrs_t); typedef enum { FLEXFLOW_DATATYPE_BOOL, From 6f682de15724e28224c6364a92f95052eef1d98e Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Sat, 19 Aug 2023 13:18:20 +0000 Subject: [PATCH 03/13] add REGISTER_OPAQUE --- lib/op-attrs/ffi/include/flexflow/op-attrs.h | 4 +- lib/op-attrs/ffi/internal/internal/op-attrs.h | 44 +++++++++++++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/lib/op-attrs/ffi/include/flexflow/op-attrs.h b/lib/op-attrs/ffi/include/flexflow/op-attrs.h index 30444c673e..fbcf225054 100644 --- a/lib/op-attrs/ffi/include/flexflow/op-attrs.h +++ b/lib/op-attrs/ffi/include/flexflow/op-attrs.h @@ -48,13 +48,13 @@ FF_NEW_OPAQUE_TYPE(flexflow_combine_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_concat_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_conv2d_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_dropout_attrs_t); -FF_NEW_OPAQUE_TYPE(flexflow_element_binary_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_element_sclar_unary_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_element_unary_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_embedding_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_flat_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_gather_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_group_by_attrs_t); -FF_NEW_OPAQUE_TYPE(flexflow_input_attrs); +FF_NEW_OPAQUE_TYPE(flexflow_input_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_layernorm_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_l1_regularizer_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_l2_regularizer_attrs_t); diff --git a/lib/op-attrs/ffi/internal/internal/op-attrs.h b/lib/op-attrs/ffi/internal/internal/op-attrs.h index df0d6ce61c..aabb0b71c2 100644 --- a/lib/op-attrs/ffi/internal/internal/op-attrs.h +++ b/lib/op-attrs/ffi/internal/internal/op-attrs.h @@ -14,6 +14,50 @@ using namespace FlexFlow; REGISTER_OPAQUE(flexflow_regularizer_attrs_t, optional); +REGISTER_OPAQUE(flexflow_ff_dim_t, ff_dim_t); +REGISTER_OPAQUE(flexflow_dim_ordered_t, DimOrdered); +REGISTER_OPAQUE(flexflow_parallel_dim_t, ParallelDim); +REGISTER_OPAQUE(flexflow_parallel_tensor_dims_t, ParallelTensorDims); +REGISTER_OPAQUE(flexflow_parallel_tensor_shape_t, ParallelTensorShape); +REGISTER_OPAQUE(flexflow_tensor_shape_t, TensorShape); + +//ops +REGISTER_OPAQUE(flexflow_aggregae_specattrs_t, AggregateSpecAttrs); +REGISTER_OPAQUE(flexflow_aggregate_t, Aggregate); +REGISTER_OPAQUE(flexflow_multihead_attentionattrs_t, MultiHeadAttentionAttrs); +REGISTER_OPAQUE(flexflow_multihead_attentioninputs_t, ultiHeadAttentionInputs); +REGISTER_OPAQUE(flexflow_batchmatmul_attrs_t, BatchMatmulAttrs); +REGISTER_OPAQUE(flexflow_batchnorm_attrs_t, BatchNormAttrs); +REGISTER_OPAQUE(flexflow_broadcast_attrs_t, BroadcastAttrs); +REGISTER_OPAQUE(flexflow_cast_attrs_t, CastAttrs); +REGISTER_OPAQUE(flexflow_combine_attrs_t, CombineAttrs); +REGISTER_OPAQUE(flexflow_concat_attrs_t, ConcatAttrs); +REGISTER_OPAQUE(flexflow_conv2d_attrs_t, Conv2DAttrs); +REGISTER_OPAQUE(flexflow_dropout_attrs_t, DropoutAttrs); +REGISTER_OPAQUE(flexflow_element_sclar_unary_attrs_t, ElementScalarUnaryAttrs); +REGISTER_OPAQUE(flexflow_element_unary_attrs_t, ElementUnaryAttrs); +REGISTER_OPAQUE(flexflow_embedding_attrs_t, EmbeddingAttrs); +REGISTER_OPAQUE(flexflow_flat_attrs_t, FlatAttrs); +REGISTER_OPAQUE(flexflow_gather_attrs_t, GatherAttrs); +REGISTER_OPAQUE(flexflow_group_by_attrs_t, GroupByAttrs); +REGISTER_OPAQUE(flexflow_input_attrs_t, InputAttrs); +REGISTER_OPAQUE(flexflow_layernorm_attrs_t, LayerNormAttrs); +REGISTER_OPAQUE(flexflow_l1_regularizer_attrs_t, L1RegularizerAttrs); +REGISTER_OPAQUE(flexflow_l2_regularizer_attrs_t, L2RegularizerAttrs); +REGISTER_OPAQUE(flexflow_linear_attrs_t, LinearAttrs); +REGISTER_OPAQUE(flexflow_sparse_categorical_crossentropy_loss_attrs_t, SparseCategoricalCrossEntropyLossAttrs); +REGISTER_OPAQUE(flexflow_other_loss_attrs_t, OtherLossAttrs); +REGISTER_OPAQUE(flexflow_noop_attrs_t, NoopAttrs); +REGISTER_OPAQUE(flexflow_pool2d_attrs_t, Pool2DAttrs); +REGISTER_OPAQUE(flexflow_reduce_attrs_t, ReduceAttrs); +REGISTER_OPAQUE(flexflow_reduction_attrs_t, ReductionAttrs); +REGISTER_OPAQUE(flexflow_repartition_attrs_t, RepartitionAttrs); +REGISTER_OPAQUE(flexflow_replicate_attrs_t, ReplicateAttrs); +REGISTER_OPAQUE(flexflow_reshape_attrs_t, ReshapeAttrs); +REGISTER_OPAQUE(flexflow_reverse_attrs_t, ReverseAttrs); +REGISTER_OPAQUE(flexflow_softmax_attrs_t, SoftmaxAttrs); +REGISTER_OPAQUE(flexflow_split_attrs_t, SplitAttrs); +REGISTER_OPAQUE(flexflow_topk_attrs_t, TopKAttrs); optional to_internal(flexflow_param_sync_t); flexflow_param_sync_t to_external(optional); From 16ca2376c2f08cd55a6797e14fd5303110b38ff2 Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Sat, 19 Aug 2023 13:40:09 +0000 Subject: [PATCH 04/13] implement the first part ffi for op-attrs --- lib/op-attrs/ffi/include/flexflow/op-attrs.h | 2 + lib/op-attrs/ffi/src/op-attrs.cc | 42 +++++++++++++++++++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/lib/op-attrs/ffi/include/flexflow/op-attrs.h b/lib/op-attrs/ffi/include/flexflow/op-attrs.h index fbcf225054..924d51eedc 100644 --- a/lib/op-attrs/ffi/include/flexflow/op-attrs.h +++ b/lib/op-attrs/ffi/include/flexflow/op-attrs.h @@ -23,6 +23,8 @@ flexflow_error_t flexflow_opattrs_error_unwrap(flexflow_error_t, flexflow_error_t flexflow_opattrs_error_is_ok(flexflow_opattrs_error_t, bool *); flexflow_error_t flexflow_opattrs_error_get_string(flexflow_opattrs_error_t, char **); +flexflow_error_t flexflow_opattrs_error_get_error_code(flexflow_opattrs_error_t, + flexflow_opattrs_error_code_t *); flexflow_error_t flexflow_opattrs_error_destroy(flexflow_opattrs_error_t); // diff --git a/lib/op-attrs/ffi/src/op-attrs.cc b/lib/op-attrs/ffi/src/op-attrs.cc index 69ff3cab80..a393e61ccf 100644 --- a/lib/op-attrs/ffi/src/op-attrs.cc +++ b/lib/op-attrs/ffi/src/op-attrs.cc @@ -15,7 +15,7 @@ flexflow_error_t flexflow_opattrs_error_wrap(flexflow_opattrs_error_t e) { } flexflow_error_t flexflow_opattrs_error_unwrap( - flexflow_error_t err flexflow_opattrs_error_t *out) { + flexflow_error_t err ,flexflow_opattrs_error_t *out) { return flexflow_error_unwrap(err, FLEXFLOW_ERROR_SOURCE_OPATTRS, out); } @@ -27,7 +27,45 @@ flexflow_error_t flexflow_opattrs_error_is_ok(flexflow_opattrs_error_t err, flexflow_error_t flexflow_opattrs_error_get_string(flexflow_opattrs_error_t err, char **m_out) { - NOT_IMPLEMENTED(); // TODO(lambda) + flexflow_opattrs_error_code_t err_code; + flexflow_opattrs_error_get_error_code(err, &err_code); + auto out = const_cast(m_out); + switch (err_code) { + case FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_PARAM_SYNC_VALUE: + *out = strdup("Invalid param sync value"); + break; + case FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_DATATYPE_VALUE: + *out = strdup("Invalid datatype value"); + break; + case FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_ACTIVATION_VALUE: + *out = strdup("Invalid activation value"); + break; + case FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_POOL_OP_VALUE: + *out = strdup("Invalid pool op value"); + break; + case FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_AGGREGATE_OP_VALUE: + *out = strdup("Invalid aggregate op value"); + break; + case FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_OP_TYPE_VALUE: + *out = strdup("Invalid op type value"); + break; + default: + *out = strdup("Unknown error"); + } + return status_ok(); +} + +flexflow_error_t flexflow_opattrs_error_get_error_code(flexflow_opattrs_error_t err, + flexflow_opattrs_error_code_t * out) { + flexflow_opattrs_error_t opaque; + RAISE_FLEXFLOW(flexflow_opattrs_error_unwrap(err, &opaque)); + interal_flexflow_opattrs_error_t const *unwrapped = unwrap_opaque(opaque); + *out = unwrapped->code; + return status_ok(); +} + +flexflow_error_t flexflow_opattrs_error_destroy(flexflow_opattrs_error_t err){ + return status_ok();// Note(lambda): this is follow the https://github.com/lockshaw/FlexFlow/blob/expanded-ffi/lib/pcg/ffi/src/pcg.cc#L71-#L72 // return flexflow_error_destroy(err); } REGISTER_FFI_ENUM(flexflow_param_sync_t, From b56074306670411a487edab102aab7922ccb8ba1 Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Sun, 20 Aug 2023 01:32:27 +0000 Subject: [PATCH 05/13] add function in op-attrs.h --- lib/op-attrs/ffi/include/flexflow/op-attrs.h | 12 +-- lib/op-attrs/ffi/internal/internal/op-attrs.h | 73 +++++++++++++++++-- 2 files changed, 75 insertions(+), 10 deletions(-) diff --git a/lib/op-attrs/ffi/include/flexflow/op-attrs.h b/lib/op-attrs/ffi/include/flexflow/op-attrs.h index 924d51eedc..a36171c770 100644 --- a/lib/op-attrs/ffi/include/flexflow/op-attrs.h +++ b/lib/op-attrs/ffi/include/flexflow/op-attrs.h @@ -33,15 +33,17 @@ FF_NEW_OPAQUE_TYPE(flexflow_regularizer_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_dim_ordered_t); FF_NEW_OPAQUE_TYPE(flexflow_ff_dim_t); FF_NEW_OPAQUE_TYPE(flexflow_parallel_dim_t); -FF_NEW_OPAQUE_TYPE(flexflow_paallel_tensor_dims_t); +FF_NEW_OPAQUE_TYPE(flexflow_parallel_tensor_dims_t); FF_NEW_OPAQUE_TYPE(flexflow_parallel_tensor_shape_t); FF_NEW_OPAQUE_TYPE(flexflow_tensor_shape_t); +FF_NEW_OPAQUE_TYPE(flexflow_parallel_tesor_shape_list_t);//std::vector +FF_NEW_OPAQUE_TYPE(flexflow_tensor_shape_list_t);//std::vector //ops -FF_NEW_OPAQUE_TYPE(flexflow_aggregae_specattrs_t); -FF_NEW_OPAQUE_TYPE(flexflow_aggregate_t); -FF_NEW_OPAQUE_TYPE(flexflow_multihead_attentionattrs_t); -FF_NEW_OPAQUE_TYPE(flexflow_multihead_attentioninputs_t); +FF_NEW_OPAQUE_TYPE(flexflow_aggregate_specattrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_aggregate_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_multihead_attention_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_multihead_attention_inputs_t); FF_NEW_OPAQUE_TYPE(flexflow_batchmatmul_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_batchnorm_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_broadcast_attrs_t); diff --git a/lib/op-attrs/ffi/internal/internal/op-attrs.h b/lib/op-attrs/ffi/internal/internal/op-attrs.h index aabb0b71c2..142c57604c 100644 --- a/lib/op-attrs/ffi/internal/internal/op-attrs.h +++ b/lib/op-attrs/ffi/internal/internal/op-attrs.h @@ -2,30 +2,68 @@ #define _FLEXFLOW_OPATTRS_FFI_INTERNAL_INTERNAL_OPATTRS_H #include "flexflow/op-attrs.h" +#include "flexflow/utils.h" #include "internal/opaque.h" #include "op-attrs/activation.h" #include "op-attrs/datatype.h" #include "op-attrs/op.h" #include "op-attrs/ops/embedding.h" +#include "op-attrs/ops/aggregate_spec.h" +#include "op-attrs/ops/aggreagate.h" +#include "op-attrs/ops/attention.h" +#include "op-attrs/ops/batch_matmul.h" +#include "op-attrs/ops/batch_norm.h" +#include "op-attrs/ops/broadcast.h" +#include "op-attrs/ops/cast.h" +#include "op-attrs/ops/combine.h" +#include "op-attrs/ops/concat.h" +#include "op-attrs/ops/conv2d.h" +#include "op-attrs/ops/dropout.h" +#include "op-attrs/ops/element_binary.h" +#include "op-attrs/ops/element_unary.h" +#include "op-attrs/ops/flat.h" +#include "op-attrs/ops/gather.h" +#include "op-attrs/ops/group_by.h" +#include "op-attrs/ops/input.h" +#include "op-attrs/ops/layer_norm.h" #include "op-attrs/ops/linear.h" +#include "op-attrs/ops/loss_function.h" +#include "op-attrs/ops/noop.h" #include "op-attrs/ops/pool_2d.h" +#include "op-attrs/ops/reduce.h" +#include "op-attrs/ops/reduction.h" +#include "op-attrs/ops/repartition.h" +#include "op-attrs/ops/replicate.h" +#include "op-attrs/ops/reshape.h" +#include "op-attrs/ops/reverse.h" +#include "op-attrs/ops/softmax.h" +#include "op-attrs/ops/split.h" +#include "op-attrs/ops/topk.h" +#include "op-attrs/ops/transpose.h" #include "op-attrs/param_sync.h" +#include "op-attrs/dim_ordered.h" +#include "op-attrs/ff_dim.h" +#include "op-attrs/parallel_dim.h" +#include "op-attrs/parallel_tensor_shape.h" +#include "op-attrs/parallel_tensor_dims.h" using namespace FlexFlow; REGISTER_OPAQUE(flexflow_regularizer_attrs_t, optional); REGISTER_OPAQUE(flexflow_ff_dim_t, ff_dim_t); -REGISTER_OPAQUE(flexflow_dim_ordered_t, DimOrdered); +//REGISTER_OPAQUE(flexflow_dim_ordered_t, DimOrdered); REGISTER_OPAQUE(flexflow_parallel_dim_t, ParallelDim); REGISTER_OPAQUE(flexflow_parallel_tensor_dims_t, ParallelTensorDims); REGISTER_OPAQUE(flexflow_parallel_tensor_shape_t, ParallelTensorShape); REGISTER_OPAQUE(flexflow_tensor_shape_t, TensorShape); +REGISTER_OPAQUE(flexflow_parallel_tesor_shape_list_t, std::vector) +REGISTER_OPAQUE(flexflow_tensor_shape_list_t, std::vector) //ops -REGISTER_OPAQUE(flexflow_aggregae_specattrs_t, AggregateSpecAttrs); -REGISTER_OPAQUE(flexflow_aggregate_t, Aggregate); -REGISTER_OPAQUE(flexflow_multihead_attentionattrs_t, MultiHeadAttentionAttrs); -REGISTER_OPAQUE(flexflow_multihead_attentioninputs_t, ultiHeadAttentionInputs); +REGISTER_OPAQUE(flexflow_aggregate_specattrs_t, AggregateSpecAttrs); +REGISTER_OPAQUE(flexflow_aggregate_attrs_t, AggregateAttrs); +REGISTER_OPAQUE(flexflow_multihead_attention_attrs_t, MultiHeadAttentionAttrs); +REGISTER_OPAQUE(flexflow_multihead_attention_inputs_t, MultiHeadAttentionInputs); REGISTER_OPAQUE(flexflow_batchmatmul_attrs_t, BatchMatmulAttrs); REGISTER_OPAQUE(flexflow_batchnorm_attrs_t, BatchNormAttrs); REGISTER_OPAQUE(flexflow_broadcast_attrs_t, BroadcastAttrs); @@ -59,6 +97,31 @@ REGISTER_OPAQUE(flexflow_softmax_attrs_t, SoftmaxAttrs); REGISTER_OPAQUE(flexflow_split_attrs_t, SplitAttrs); REGISTER_OPAQUE(flexflow_topk_attrs_t, TopKAttrs); +flexflow_error_t flexflow_get_output_shape(flexflow_aggregate_specattrs_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t *out, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_list_t); +flexflow_error_t flexflow_get_output_shape(flexflow_aggregate_attrs_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t *out, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_list_t); +//TODO(Note(lambda)):how to support op-attrs/include/op-attrs/ops/attention.h? how to define MultiHeadAttentionInputs +flexflow_error_t flexflow_get_output_shape(flexflow_batchnorm_attrs_t, flexflow_parallel_tensor_shape_t * out);//ParallelTensorShape get_output_shape(BatchNormAttrs const &); +flexflow_error_t flexflow_get_kernel_shape(flexflow_conv2d_attrs_t, flexflow_tensor_shape_t * out, flexflow_tensor_shape_t);//TensorShape get_kernel_shape(Conv2DAttrs const &. TensorShape const &); +flexflow_error_t flexflow_get_bias_shape(flexflow_conv2d_attrs_t, flexflow_tensor_shape_t * out, flexflow_tensor_shape_t);//TensorShape get_bias_shape(Conv2DAttrs const &, TensorShape const &); +flexflow_error_t flexflow_get_weights_shape(flexflow_embedding_attrs_t, flexflow_tensor_shape_t * out, flexflow_tensor_shape);//TensorShape get_weights_shape(EmbeddingAttrs const &, TensorShape const &); +//how to represent the LossFunction parse_loss_function_name(std::string const &);? because its parameter is std::string +flexflow_error_t flexflow_get_loss_function(flexflow_other_loss_attrs_t, flexflow_loss_function_t * out);//LossFunction get_loss_function(OtherLossAttrs const &); +flexflow_error_t flexflow_get_loss_function(flexflow_sparse_categorical_crossentropy_loss_attrs_t, flexflow_loss_function_t * out);//LossFunction get_loss_function(SparseCategoricalCrossEntropyLossAttrs const &); +//flexflow_error_t flexflow_get_loss_function(flexflow_loss_) +//TODO(Note lambda):how to define nner_to_outer_idxs, outer_to_inner_idxs, outer_to_inner(op-attrs/include/op-attrs/dim_ordered.h) +/* +how to define the function 1)its parameter is bool, std::string, or 2)its return type is bool, std::string,int + bool is_parallel_op(PCGOperatorAttrs const &); +bool is_valid(PCGOperatorAttrs const &, + std::vector const &); +*/ +flexflow_error_t flexflow_get_piece_dims(flexflow_parallel_tensor_dims_t, flexflow_tensor_dims_t * out);//TensorDims get_piece_dims(ParallelTensorDims const &); +flexflow_error_t flexflow_get_tensor_dims_unsafe(flexflow_parallel_tensor_dims_t, flexflow_tensor_dims_t * out);//TensorDims get_tensor_dims_unsafe(ParallelTensorDims const &); +flexflow_error_t flexflow_get_piece_shape(flexflow_parallel_tensor_shape_t, flexflow_tensor_shape_t * out);//TensorShape get_piece_shape(ParallelTensorShape const &); +flexflow_error_t flexflow_get_tensor_shape_unsafe(flexflow_parallel_tensor_shape_t, flexflow_tensor_shape_t * out);//TensorShape get_tensor_shape_unsafe(ParallelTensorShape const &); +flexflow_error_t flexflow_get_tensor_shape_unsafe(flexflow_parallel_tesor_shape_list_t, flexflow_tensor_shape_list_t *out);//std::vector get_tensor_shape_unsafe(std::vector const &); + + optional to_internal(flexflow_param_sync_t); flexflow_param_sync_t to_external(optional); From 4831ead0d4823a89f9f63646c98d485d1961a95c Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Sun, 20 Aug 2023 01:34:10 +0000 Subject: [PATCH 06/13] format the code --- lib/op-attrs/ffi/include/flexflow/op-attrs.h | 14 +- lib/op-attrs/ffi/internal/internal/op-attrs.h | 120 +++++++++++++----- lib/op-attrs/ffi/src/op-attrs.cc | 23 ++-- 3 files changed, 110 insertions(+), 47 deletions(-) diff --git a/lib/op-attrs/ffi/include/flexflow/op-attrs.h b/lib/op-attrs/ffi/include/flexflow/op-attrs.h index a36171c770..b871633fc1 100644 --- a/lib/op-attrs/ffi/include/flexflow/op-attrs.h +++ b/lib/op-attrs/ffi/include/flexflow/op-attrs.h @@ -23,8 +23,9 @@ flexflow_error_t flexflow_opattrs_error_unwrap(flexflow_error_t, flexflow_error_t flexflow_opattrs_error_is_ok(flexflow_opattrs_error_t, bool *); flexflow_error_t flexflow_opattrs_error_get_string(flexflow_opattrs_error_t, char **); -flexflow_error_t flexflow_opattrs_error_get_error_code(flexflow_opattrs_error_t, - flexflow_opattrs_error_code_t *); +flexflow_error_t + flexflow_opattrs_error_get_error_code(flexflow_opattrs_error_t, + flexflow_opattrs_error_code_t *); flexflow_error_t flexflow_opattrs_error_destroy(flexflow_opattrs_error_t); // @@ -33,13 +34,14 @@ FF_NEW_OPAQUE_TYPE(flexflow_regularizer_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_dim_ordered_t); FF_NEW_OPAQUE_TYPE(flexflow_ff_dim_t); FF_NEW_OPAQUE_TYPE(flexflow_parallel_dim_t); -FF_NEW_OPAQUE_TYPE(flexflow_parallel_tensor_dims_t); +FF_NEW_OPAQUE_TYPE(flexflow_parallel_tensor_dims_t); FF_NEW_OPAQUE_TYPE(flexflow_parallel_tensor_shape_t); FF_NEW_OPAQUE_TYPE(flexflow_tensor_shape_t); -FF_NEW_OPAQUE_TYPE(flexflow_parallel_tesor_shape_list_t);//std::vector -FF_NEW_OPAQUE_TYPE(flexflow_tensor_shape_list_t);//std::vector +FF_NEW_OPAQUE_TYPE( + flexflow_parallel_tesor_shape_list_t); // std::vector +FF_NEW_OPAQUE_TYPE(flexflow_tensor_shape_list_t); // std::vector -//ops +// ops FF_NEW_OPAQUE_TYPE(flexflow_aggregate_specattrs_t); FF_NEW_OPAQUE_TYPE(flexflow_aggregate_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_multihead_attention_attrs_t); diff --git a/lib/op-attrs/ffi/internal/internal/op-attrs.h b/lib/op-attrs/ffi/internal/internal/op-attrs.h index 142c57604c..43a24ae5a2 100644 --- a/lib/op-attrs/ffi/internal/internal/op-attrs.h +++ b/lib/op-attrs/ffi/internal/internal/op-attrs.h @@ -6,10 +6,11 @@ #include "internal/opaque.h" #include "op-attrs/activation.h" #include "op-attrs/datatype.h" +#include "op-attrs/dim_ordered.h" +#include "op-attrs/ff_dim.h" #include "op-attrs/op.h" -#include "op-attrs/ops/embedding.h" -#include "op-attrs/ops/aggregate_spec.h" #include "op-attrs/ops/aggreagate.h" +#include "op-attrs/ops/aggregate_spec.h" #include "op-attrs/ops/attention.h" #include "op-attrs/ops/batch_matmul.h" #include "op-attrs/ops/batch_norm.h" @@ -21,6 +22,7 @@ #include "op-attrs/ops/dropout.h" #include "op-attrs/ops/element_binary.h" #include "op-attrs/ops/element_unary.h" +#include "op-attrs/ops/embedding.h" #include "op-attrs/ops/flat.h" #include "op-attrs/ops/gather.h" #include "op-attrs/ops/group_by.h" @@ -40,30 +42,30 @@ #include "op-attrs/ops/split.h" #include "op-attrs/ops/topk.h" #include "op-attrs/ops/transpose.h" -#include "op-attrs/param_sync.h" -#include "op-attrs/dim_ordered.h" -#include "op-attrs/ff_dim.h" #include "op-attrs/parallel_dim.h" -#include "op-attrs/parallel_tensor_shape.h" #include "op-attrs/parallel_tensor_dims.h" +#include "op-attrs/parallel_tensor_shape.h" +#include "op-attrs/param_sync.h" using namespace FlexFlow; REGISTER_OPAQUE(flexflow_regularizer_attrs_t, optional); REGISTER_OPAQUE(flexflow_ff_dim_t, ff_dim_t); -//REGISTER_OPAQUE(flexflow_dim_ordered_t, DimOrdered); +// REGISTER_OPAQUE(flexflow_dim_ordered_t, DimOrdered); REGISTER_OPAQUE(flexflow_parallel_dim_t, ParallelDim); REGISTER_OPAQUE(flexflow_parallel_tensor_dims_t, ParallelTensorDims); REGISTER_OPAQUE(flexflow_parallel_tensor_shape_t, ParallelTensorShape); REGISTER_OPAQUE(flexflow_tensor_shape_t, TensorShape); -REGISTER_OPAQUE(flexflow_parallel_tesor_shape_list_t, std::vector) +REGISTER_OPAQUE(flexflow_parallel_tesor_shape_list_t, + std::vector) REGISTER_OPAQUE(flexflow_tensor_shape_list_t, std::vector) -//ops +// ops REGISTER_OPAQUE(flexflow_aggregate_specattrs_t, AggregateSpecAttrs); REGISTER_OPAQUE(flexflow_aggregate_attrs_t, AggregateAttrs); REGISTER_OPAQUE(flexflow_multihead_attention_attrs_t, MultiHeadAttentionAttrs); -REGISTER_OPAQUE(flexflow_multihead_attention_inputs_t, MultiHeadAttentionInputs); +REGISTER_OPAQUE(flexflow_multihead_attention_inputs_t, + MultiHeadAttentionInputs); REGISTER_OPAQUE(flexflow_batchmatmul_attrs_t, BatchMatmulAttrs); REGISTER_OPAQUE(flexflow_batchnorm_attrs_t, BatchNormAttrs); REGISTER_OPAQUE(flexflow_broadcast_attrs_t, BroadcastAttrs); @@ -83,7 +85,8 @@ REGISTER_OPAQUE(flexflow_layernorm_attrs_t, LayerNormAttrs); REGISTER_OPAQUE(flexflow_l1_regularizer_attrs_t, L1RegularizerAttrs); REGISTER_OPAQUE(flexflow_l2_regularizer_attrs_t, L2RegularizerAttrs); REGISTER_OPAQUE(flexflow_linear_attrs_t, LinearAttrs); -REGISTER_OPAQUE(flexflow_sparse_categorical_crossentropy_loss_attrs_t, SparseCategoricalCrossEntropyLossAttrs); +REGISTER_OPAQUE(flexflow_sparse_categorical_crossentropy_loss_attrs_t, + SparseCategoricalCrossEntropyLossAttrs); REGISTER_OPAQUE(flexflow_other_loss_attrs_t, OtherLossAttrs); REGISTER_OPAQUE(flexflow_noop_attrs_t, NoopAttrs); REGISTER_OPAQUE(flexflow_pool2d_attrs_t, Pool2DAttrs); @@ -97,30 +100,85 @@ REGISTER_OPAQUE(flexflow_softmax_attrs_t, SoftmaxAttrs); REGISTER_OPAQUE(flexflow_split_attrs_t, SplitAttrs); REGISTER_OPAQUE(flexflow_topk_attrs_t, TopKAttrs); -flexflow_error_t flexflow_get_output_shape(flexflow_aggregate_specattrs_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t *out, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_list_t); -flexflow_error_t flexflow_get_output_shape(flexflow_aggregate_attrs_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t *out, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_list_t); -//TODO(Note(lambda)):how to support op-attrs/include/op-attrs/ops/attention.h? how to define MultiHeadAttentionInputs -flexflow_error_t flexflow_get_output_shape(flexflow_batchnorm_attrs_t, flexflow_parallel_tensor_shape_t * out);//ParallelTensorShape get_output_shape(BatchNormAttrs const &); -flexflow_error_t flexflow_get_kernel_shape(flexflow_conv2d_attrs_t, flexflow_tensor_shape_t * out, flexflow_tensor_shape_t);//TensorShape get_kernel_shape(Conv2DAttrs const &. TensorShape const &); -flexflow_error_t flexflow_get_bias_shape(flexflow_conv2d_attrs_t, flexflow_tensor_shape_t * out, flexflow_tensor_shape_t);//TensorShape get_bias_shape(Conv2DAttrs const &, TensorShape const &); -flexflow_error_t flexflow_get_weights_shape(flexflow_embedding_attrs_t, flexflow_tensor_shape_t * out, flexflow_tensor_shape);//TensorShape get_weights_shape(EmbeddingAttrs const &, TensorShape const &); -//how to represent the LossFunction parse_loss_function_name(std::string const &);? because its parameter is std::string -flexflow_error_t flexflow_get_loss_function(flexflow_other_loss_attrs_t, flexflow_loss_function_t * out);//LossFunction get_loss_function(OtherLossAttrs const &); -flexflow_error_t flexflow_get_loss_function(flexflow_sparse_categorical_crossentropy_loss_attrs_t, flexflow_loss_function_t * out);//LossFunction get_loss_function(SparseCategoricalCrossEntropyLossAttrs const &); -//flexflow_error_t flexflow_get_loss_function(flexflow_loss_) -//TODO(Note lambda):how to define nner_to_outer_idxs, outer_to_inner_idxs, outer_to_inner(op-attrs/include/op-attrs/dim_ordered.h) +flexflow_error_t + flexflow_get_output_shape(flexflow_aggregate_specattrs_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t *out, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_list_t); +flexflow_error_t + flexflow_get_output_shape(flexflow_aggregate_attrs_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t *out, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_list_t); +// TODO(Note(lambda)):how to support op-attrs/include/op-attrs/ops/attention.h? +// how to define MultiHeadAttentionInputs +flexflow_error_t flexflow_get_output_shape( + flexflow_batchnorm_attrs_t, + flexflow_parallel_tensor_shape_t + *out); // ParallelTensorShape get_output_shape(BatchNormAttrs const &); +flexflow_error_t flexflow_get_kernel_shape( + flexflow_conv2d_attrs_t, + flexflow_tensor_shape_t *out, + flexflow_tensor_shape_t); // TensorShape get_kernel_shape(Conv2DAttrs const + // &. TensorShape const &); +flexflow_error_t flexflow_get_bias_shape( + flexflow_conv2d_attrs_t, + flexflow_tensor_shape_t *out, + flexflow_tensor_shape_t); // TensorShape get_bias_shape(Conv2DAttrs const &, + // TensorShape const &); +flexflow_error_t flexflow_get_weights_shape( + flexflow_embedding_attrs_t, + flexflow_tensor_shape_t *out, + flexflow_tensor_shape); // TensorShape get_weights_shape(EmbeddingAttrs + // const &, TensorShape const &); +// how to represent the LossFunction parse_loss_function_name(std::string const +// &);? because its parameter is std::string +flexflow_error_t flexflow_get_loss_function( + flexflow_other_loss_attrs_t, + flexflow_loss_function_t + *out); // LossFunction get_loss_function(OtherLossAttrs const &); +flexflow_error_t flexflow_get_loss_function( + flexflow_sparse_categorical_crossentropy_loss_attrs_t, + flexflow_loss_function_t *out); // LossFunction + // get_loss_function(SparseCategoricalCrossEntropyLossAttrs + // const &); +// flexflow_error_t flexflow_get_loss_function(flexflow_loss_) +// TODO(Note lambda):how to define nner_to_outer_idxs, outer_to_inner_idxs, +// outer_to_inner(op-attrs/include/op-attrs/dim_ordered.h) /* -how to define the function 1)its parameter is bool, std::string, or 2)its return type is bool, std::string,int - bool is_parallel_op(PCGOperatorAttrs const &); +how to define the function 1)its parameter is bool, std::string, or 2)its return +type is bool, std::string,int bool is_parallel_op(PCGOperatorAttrs const &); bool is_valid(PCGOperatorAttrs const &, std::vector const &); */ -flexflow_error_t flexflow_get_piece_dims(flexflow_parallel_tensor_dims_t, flexflow_tensor_dims_t * out);//TensorDims get_piece_dims(ParallelTensorDims const &); -flexflow_error_t flexflow_get_tensor_dims_unsafe(flexflow_parallel_tensor_dims_t, flexflow_tensor_dims_t * out);//TensorDims get_tensor_dims_unsafe(ParallelTensorDims const &); -flexflow_error_t flexflow_get_piece_shape(flexflow_parallel_tensor_shape_t, flexflow_tensor_shape_t * out);//TensorShape get_piece_shape(ParallelTensorShape const &); -flexflow_error_t flexflow_get_tensor_shape_unsafe(flexflow_parallel_tensor_shape_t, flexflow_tensor_shape_t * out);//TensorShape get_tensor_shape_unsafe(ParallelTensorShape const &); -flexflow_error_t flexflow_get_tensor_shape_unsafe(flexflow_parallel_tesor_shape_list_t, flexflow_tensor_shape_list_t *out);//std::vector get_tensor_shape_unsafe(std::vector const &); - +flexflow_error_t flexflow_get_piece_dims( + flexflow_parallel_tensor_dims_t, + flexflow_tensor_dims_t + *out); // TensorDims get_piece_dims(ParallelTensorDims const &); +flexflow_error_t flexflow_get_tensor_dims_unsafe( + flexflow_parallel_tensor_dims_t, + flexflow_tensor_dims_t + *out); // TensorDims get_tensor_dims_unsafe(ParallelTensorDims const &); +flexflow_error_t flexflow_get_piece_shape( + flexflow_parallel_tensor_shape_t, + flexflow_tensor_shape_t + *out); // TensorShape get_piece_shape(ParallelTensorShape const &); +flexflow_error_t flexflow_get_tensor_shape_unsafe( + flexflow_parallel_tensor_shape_t, + flexflow_tensor_shape_t + *out); // TensorShape get_tensor_shape_unsafe(ParallelTensorShape const + // &); +flexflow_error_t flexflow_get_tensor_shape_unsafe( + flexflow_parallel_tesor_shape_list_t, + flexflow_tensor_shape_list_t *out); // std::vector + // get_tensor_shape_unsafe(std::vector + // const &); optional to_internal(flexflow_param_sync_t); flexflow_param_sync_t to_external(optional); diff --git a/lib/op-attrs/ffi/src/op-attrs.cc b/lib/op-attrs/ffi/src/op-attrs.cc index a393e61ccf..8796118a99 100644 --- a/lib/op-attrs/ffi/src/op-attrs.cc +++ b/lib/op-attrs/ffi/src/op-attrs.cc @@ -14,8 +14,8 @@ flexflow_error_t flexflow_opattrs_error_wrap(flexflow_opattrs_error_t e) { return flexflow_error_wrap(FLEXFLOW_ERROR_SOURCE_OPATTRS, *unwrap_opaque(e)); } -flexflow_error_t flexflow_opattrs_error_unwrap( - flexflow_error_t err ,flexflow_opattrs_error_t *out) { +flexflow_error_t flexflow_opattrs_error_unwrap(flexflow_error_t err, + flexflow_opattrs_error_t *out) { return flexflow_error_unwrap(err, FLEXFLOW_ERROR_SOURCE_OPATTRS, out); } @@ -29,7 +29,7 @@ flexflow_error_t flexflow_opattrs_error_get_string(flexflow_opattrs_error_t err, char **m_out) { flexflow_opattrs_error_code_t err_code; flexflow_opattrs_error_get_error_code(err, &err_code); - auto out = const_cast(m_out); + auto out = const_cast(m_out); switch (err_code) { case FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_PARAM_SYNC_VALUE: *out = strdup("Invalid param sync value"); @@ -55,17 +55,20 @@ flexflow_error_t flexflow_opattrs_error_get_string(flexflow_opattrs_error_t err, return status_ok(); } -flexflow_error_t flexflow_opattrs_error_get_error_code(flexflow_opattrs_error_t err, - flexflow_opattrs_error_code_t * out) { - flexflow_opattrs_error_t opaque; - RAISE_FLEXFLOW(flexflow_opattrs_error_unwrap(err, &opaque)); - interal_flexflow_opattrs_error_t const *unwrapped = unwrap_opaque(opaque); +flexflow_error_t + flexflow_opattrs_error_get_error_code(flexflow_opattrs_error_t err, + flexflow_opattrs_error_code_t *out) { + flexflow_opattrs_error_t opaque; + RAISE_FLEXFLOW(flexflow_opattrs_error_unwrap(err, &opaque)); + interal_flexflow_opattrs_error_t const *unwrapped = unwrap_opaque(opaque); *out = unwrapped->code; return status_ok(); } -flexflow_error_t flexflow_opattrs_error_destroy(flexflow_opattrs_error_t err){ - return status_ok();// Note(lambda): this is follow the https://github.com/lockshaw/FlexFlow/blob/expanded-ffi/lib/pcg/ffi/src/pcg.cc#L71-#L72 // return flexflow_error_destroy(err); +flexflow_error_t flexflow_opattrs_error_destroy(flexflow_opattrs_error_t err) { + return status_ok(); // Note(lambda): this is follow the + // https://github.com/lockshaw/FlexFlow/blob/expanded-ffi/lib/pcg/ffi/src/pcg.cc#L71-#L72 + // // return flexflow_error_destroy(err); } REGISTER_FFI_ENUM(flexflow_param_sync_t, From 63e29731ed93791f75debc19f6e91e7172e3843a Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Sun, 20 Aug 2023 09:15:22 +0000 Subject: [PATCH 07/13] add all necessary method in lib/op-attrs/ffi/internal/internal/op-attrs.h --- lib/op-attrs/ffi/include/flexflow/op-attrs.h | 15 +- lib/op-attrs/ffi/internal/internal/op-attrs.h | 131 +++++++++++++++--- lib/op-attrs/ffi/src/op-attrs.cc | 15 +- 3 files changed, 137 insertions(+), 24 deletions(-) diff --git a/lib/op-attrs/ffi/include/flexflow/op-attrs.h b/lib/op-attrs/ffi/include/flexflow/op-attrs.h index b871633fc1..20dc2d8d03 100644 --- a/lib/op-attrs/ffi/include/flexflow/op-attrs.h +++ b/lib/op-attrs/ffi/include/flexflow/op-attrs.h @@ -13,7 +13,8 @@ typedef enum { FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_ACTIVATION_VALUE, FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_POOL_OP_VALUE, FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_AGGREGATE_OP_VALUE, - FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_OP_TYPE_VALUE + FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_OP_TYPE_VALUE, + FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_LOSS_FUNCTION_VALUE, } flexflow_opattrs_error_code_t; FF_NEW_OPAQUE_TYPE(flexflow_opattrs_error_t); @@ -45,7 +46,8 @@ FF_NEW_OPAQUE_TYPE(flexflow_tensor_shape_list_t); // std::vector FF_NEW_OPAQUE_TYPE(flexflow_aggregate_specattrs_t); FF_NEW_OPAQUE_TYPE(flexflow_aggregate_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_multihead_attention_attrs_t); -FF_NEW_OPAQUE_TYPE(flexflow_multihead_attention_inputs_t); +FF_NEW_OPAQUE_TYPE(flexflow_multihead_attention_inputs_parallel_tensor_shape_t); +FF_NEW_OPAQUE_TYPE(flexflow_multihead_attention_inputs_tensor_shape_t); FF_NEW_OPAQUE_TYPE(flexflow_batchmatmul_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_batchnorm_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_broadcast_attrs_t); @@ -67,6 +69,7 @@ FF_NEW_OPAQUE_TYPE(flexflow_l2_regularizer_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_linear_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_sparse_categorical_crossentropy_loss_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_other_loss_attrs_t); +FF_NEW_OPAQUE_TYPE(flexflow_loss_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_noop_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_pool2d_attrs_t); FF_NEW_OPAQUE_TYPE(flexflow_reduce_attrs_t); @@ -113,6 +116,14 @@ typedef enum { FLEXFLOW_AGGREGATE_OP_AVG, } flexflow_aggregate_op_t; +typedef enum { + FLEXFLOW_LOSS_FUNCTION_CATEGORICAL_CROSSENTROPY, + FLEXFLOW_LOSS_FUNCTION_SPARSE_CATEGORICAL_CROSSENTROPY, + FLEXFLOW_LOSS_FUNCTION_MEAN_SQUARED_ERROR_AVG_REDUCE, + FLEXFLOW_LOSS_FUNCTION_MEAN_SQUARED_ERROR_SUM_REDUCE, + FLEXFLOW_LOSS_FUNCTION_IDENTITY, +} flexflow_loss_function_t; + typedef enum { // does _not_ have to stay synchronized with op-attrs/op.h FLEXFLOW_OP_TYPE_NOOP, FLEXFLOW_OP_TYPE_INPUT, diff --git a/lib/op-attrs/ffi/internal/internal/op-attrs.h b/lib/op-attrs/ffi/internal/internal/op-attrs.h index 43a24ae5a2..d5b08a2a3f 100644 --- a/lib/op-attrs/ffi/internal/internal/op-attrs.h +++ b/lib/op-attrs/ffi/internal/internal/op-attrs.h @@ -64,8 +64,10 @@ REGISTER_OPAQUE(flexflow_tensor_shape_list_t, std::vector) REGISTER_OPAQUE(flexflow_aggregate_specattrs_t, AggregateSpecAttrs); REGISTER_OPAQUE(flexflow_aggregate_attrs_t, AggregateAttrs); REGISTER_OPAQUE(flexflow_multihead_attention_attrs_t, MultiHeadAttentionAttrs); -REGISTER_OPAQUE(flexflow_multihead_attention_inputs_t, - MultiHeadAttentionInputs); +REGISTER_OPAQUE(flexflow_multihead_attention_inputs_parallel_tensor_shape_t, + MultiHeadAttentionInputs); +REGISTER_OPAQUE(flexflow_multihead_attention_inputs_tensor_shape_t, + MultiHeadAttentionInputs); REGISTER_OPAQUE(flexflow_batchmatmul_attrs_t, BatchMatmulAttrs); REGISTER_OPAQUE(flexflow_batchnorm_attrs_t, BatchNormAttrs); REGISTER_OPAQUE(flexflow_broadcast_attrs_t, BroadcastAttrs); @@ -88,6 +90,7 @@ REGISTER_OPAQUE(flexflow_linear_attrs_t, LinearAttrs); REGISTER_OPAQUE(flexflow_sparse_categorical_crossentropy_loss_attrs_t, SparseCategoricalCrossEntropyLossAttrs); REGISTER_OPAQUE(flexflow_other_loss_attrs_t, OtherLossAttrs); +REGISTER_OPAQUE(flexflow_loss_attrs_t, LossAttrs); REGISTER_OPAQUE(flexflow_noop_attrs_t, NoopAttrs); REGISTER_OPAQUE(flexflow_pool2d_attrs_t, Pool2DAttrs); REGISTER_OPAQUE(flexflow_reduce_attrs_t, ReduceAttrs); @@ -108,6 +111,61 @@ flexflow_error_t flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_list_t); +flexflow_error_t flexflow_is_valid(flexflow_aggregate_attrs_t, + flexflow_parallel_tensor_shape_t, + bool *out flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_list_t); + +flexflow_error_t flexflow_get_kProjSize(flexflow_multihead_attention_attrs_t, + int *out); +flexflow_error_t flexflow_get_vProjSize(flexflow_multihead_attention_attrs_t, + int *out); +flexflow_error_t flexflow_get_kProjSize(flexflow_multihead_attention_attrs_t, + int *out); +flexflow_error_t flexflow_get_oProjSize(flexflow_multihead_attention_attrs_t, + int *out); + +flexflow_error_t flexflow_get_qSize( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); +flexflow_error_t flexflow_get_kSize( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); +flexflow_error_t flexflow_get_vSize( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); +flexflow_error_t flexflow_get_oSize( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); + +flexflow_error_t flexflow_get_qoSeqLength( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); +flexflow_error_t flexflow_get_kvSeqLength( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); +flexflow_error_t flexflow_get_kvSeqLength( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); + +flexflow_error_t flexflow_get_num_samples( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); + +flexflow_error_t flexflow_get_weights_shape( + flexflow_multihead_attention_attrs_t, + flexflow_tensor_shape_t *out + flexflow_multihead_attention_inputs_tensor_shape_t); + +flexflow_error_t flexflow_get_weights_shape( + flexflow_multihead_attention_attrs_t, + flexflow_parallel_tensor_shape_t *out + flexflow_multihead_attention_inputs_parallel_tensor_shape_t); + +flexflow_error_t flexflow_get_output_shape( + flexflow_multihead_attention_attrs_t, + flexflow_parallel_tensor_shape_t *out, + flexflow_multihead_attention_inputs_tensor_shape_t); + +flexflow_error_t flexflow_get_output_shape( + flexflow_multihead_attention_attrs_t, + flexflow_tensor_shape_t *out, + flexflow_multihead_attention_inputs_tensor_shape_t); + flexflow_error_t flexflow_get_output_shape(flexflow_aggregate_attrs_t, flexflow_parallel_tensor_shape_t, @@ -116,69 +174,100 @@ flexflow_error_t flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_list_t); -// TODO(Note(lambda)):how to support op-attrs/include/op-attrs/ops/attention.h? -// how to define MultiHeadAttentionInputs + flexflow_error_t flexflow_get_output_shape( flexflow_batchnorm_attrs_t, flexflow_parallel_tensor_shape_t *out); // ParallelTensorShape get_output_shape(BatchNormAttrs const &); + flexflow_error_t flexflow_get_kernel_shape( flexflow_conv2d_attrs_t, flexflow_tensor_shape_t *out, flexflow_tensor_shape_t); // TensorShape get_kernel_shape(Conv2DAttrs const // &. TensorShape const &); + flexflow_error_t flexflow_get_bias_shape( flexflow_conv2d_attrs_t, flexflow_tensor_shape_t *out, flexflow_tensor_shape_t); // TensorShape get_bias_shape(Conv2DAttrs const &, // TensorShape const &); + flexflow_error_t flexflow_get_weights_shape( flexflow_embedding_attrs_t, flexflow_tensor_shape_t *out, flexflow_tensor_shape); // TensorShape get_weights_shape(EmbeddingAttrs // const &, TensorShape const &); -// how to represent the LossFunction parse_loss_function_name(std::string const -// &);? because its parameter is std::string + +// has some problem on loss_function.h +// how to define LossFunction in loss_function.h +flexflow_error_t flexflow_parse_loss_function_name( + char **, + flexflow_loss_function_t + *out); // LossFunction parse_loss_function_name(std::string const &); + flexflow_error_t flexflow_get_loss_function( flexflow_other_loss_attrs_t, flexflow_loss_function_t *out); // LossFunction get_loss_function(OtherLossAttrs const &); + flexflow_error_t flexflow_get_loss_function( flexflow_sparse_categorical_crossentropy_loss_attrs_t, - flexflow_loss_function_t *out); // LossFunction - // get_loss_function(SparseCategoricalCrossEntropyLossAttrs - // const &); -// flexflow_error_t flexflow_get_loss_function(flexflow_loss_) -// TODO(Note lambda):how to define nner_to_outer_idxs, outer_to_inner_idxs, -// outer_to_inner(op-attrs/include/op-attrs/dim_ordered.h) -/* -how to define the function 1)its parameter is bool, std::string, or 2)its return -type is bool, std::string,int bool is_parallel_op(PCGOperatorAttrs const &); -bool is_valid(PCGOperatorAttrs const &, - std::vector const &); -*/ + flexflow_loss_function_t + *out); // LossFunction + // get_loss_function(SparseCategoricalCrossEntropyLossAttrs + // const &); + +flexflow_error_t flexflow_get_loss_function( + flexflow_loss_attrs_t, + flexflow_loss_function_t + *out); // LossFunction get_loss_function(LossAttrs const &); + +// TODO(Note lambda):how to define nner_to_outer_idxs, outer_to_inner_idxs,how +// to define DimOrdered outer_to_inner(op-attrs/include/op-attrs/dim_ordered.h) + +// Note(lambda): have to define all +// get_output_shape(op-attrs/include/op-attrs/get_output_shapes.h)? +flexflow_error_t + flexflow_is_valid(flexflow_parallel_tensor_dims_t, + bool *out); // bool is_valid(ParallelTensorDims const &); + flexflow_error_t flexflow_get_piece_dims( flexflow_parallel_tensor_dims_t, flexflow_tensor_dims_t *out); // TensorDims get_piece_dims(ParallelTensorDims const &); + flexflow_error_t flexflow_get_tensor_dims_unsafe( flexflow_parallel_tensor_dims_t, flexflow_tensor_dims_t *out); // TensorDims get_tensor_dims_unsafe(ParallelTensorDims const &); + flexflow_error_t flexflow_get_piece_shape( flexflow_parallel_tensor_shape_t, flexflow_tensor_shape_t *out); // TensorShape get_piece_shape(ParallelTensorShape const &); + +flexflow_error_t flexflow_get_num_replica_dims( + flexflolw_parallel_tensor_shape_t, + int *out); // int get_num_replica_dims(ParallelTensorShape const &); +flexflow_error_t flexflow_get_num_replicas( + flexflow_parallel_tensor_shape_t, + int *out); // int get_num_replicas(ParallelTensorShape const &); +flexflow_error_t + flexflow_is_valid(flexflow_parallel_tensor_shape_t, + bool *out); // bool is_valid(ParallelTensorShape const &); + flexflow_error_t flexflow_get_tensor_shape_unsafe( flexflow_parallel_tensor_shape_t, flexflow_tensor_shape_t *out); // TensorShape get_tensor_shape_unsafe(ParallelTensorShape const // &); + flexflow_error_t flexflow_get_tensor_shape_unsafe( flexflow_parallel_tesor_shape_list_t, - flexflow_tensor_shape_list_t *out); // std::vector - // get_tensor_shape_unsafe(std::vector - // const &); + flexflow_tensor_shape_list_t + *out); // std::vector + // get_tensor_shape_unsafe(std::vector + // const &); optional to_internal(flexflow_param_sync_t); flexflow_param_sync_t to_external(optional); diff --git a/lib/op-attrs/ffi/src/op-attrs.cc b/lib/op-attrs/ffi/src/op-attrs.cc index 8796118a99..7bc9b49d93 100644 --- a/lib/op-attrs/ffi/src/op-attrs.cc +++ b/lib/op-attrs/ffi/src/op-attrs.cc @@ -1,9 +1,11 @@ #include "flexflow/op-attrs.h" +#include "flexflow/utils.h" #include "internal/enums.h" #include "internal/error.h" #include "internal/op-attrs.h" #include "op-attrs/op.h" #include "op-attrs/ops/embedding.h" +#include "op-attrs/ops/loss_functions.h" #include "utils/bidict.h" #include "utils/exception.h" @@ -68,7 +70,6 @@ flexflow_error_t flexflow_error_t flexflow_opattrs_error_destroy(flexflow_opattrs_error_t err) { return status_ok(); // Note(lambda): this is follow the // https://github.com/lockshaw/FlexFlow/blob/expanded-ffi/lib/pcg/ffi/src/pcg.cc#L71-#L72 - // // return flexflow_error_destroy(err); } REGISTER_FFI_ENUM(flexflow_param_sync_t, @@ -108,6 +109,18 @@ REGISTER_FFI_ENUM(flexflow_aggregate_op_t, {{FLEXFLOW_AGGREGATE_OP_SUM, AggregateOp::SUM}, {FLEXFLOW_AGGREGATE_OP_AVG, AggregateOp::AVG}}); +REGISTER_FFI_NUM(flexflow_loss_function_t, + LossFunction, + FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_LOSS_FUNCTION_VALUE, + {{FLEXFLOW_LOSS_FUNCTION_CATEGORICAL_CROSSENTROPY, + LossFunction::CATEGORICAL_CROSSENTROPY}, + {FLEXFLOW_LOSS_FUNCTION_SPARSE_CATEGORICAL_CROSSENTROPY, + LossFunction::SPARSE_CATEGORICAL_CROSSENTROPY}, + {FLEXFLOW_LOSS_FUNCTION_MEAN_SQUARED_ERROR, + LossFunction::MEAN_SQUARED_ERROR}, + {FLEXFLOW_LOSS_FUNCTION_MEAN_ABSOLUTE_ERROR, + LossFunction::MEAN_ABSOLUTE_ERROR}}); + REGISTER_FFI_ENUM(flexflow_op_type_t, OperatorType, FLEXFLOW_OPATTRS_ERROR_CODE_INVALID_OP_TYPE_VALUE, From abf8a60d37544f0308df101c1d6690b598b817f4 Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Sun, 20 Aug 2023 09:27:28 +0000 Subject: [PATCH 08/13] add some notes --- lib/op-attrs/ffi/internal/internal/op-attrs.h | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/lib/op-attrs/ffi/internal/internal/op-attrs.h b/lib/op-attrs/ffi/internal/internal/op-attrs.h index d5b08a2a3f..f8e24b037b 100644 --- a/lib/op-attrs/ffi/internal/internal/op-attrs.h +++ b/lib/op-attrs/ffi/internal/internal/op-attrs.h @@ -264,11 +264,49 @@ flexflow_error_t flexflow_get_tensor_shape_unsafe( flexflow_error_t flexflow_get_tensor_shape_unsafe( flexflow_parallel_tesor_shape_list_t, + int n, flexflow_tensor_shape_list_t *out); // std::vector // get_tensor_shape_unsafe(std::vector // const &); +/* +flexflow_error_t flexflow_computation_graph_add_op_aggregate( + flexflow_computation_graph_t cg, + flexflow_tensor_t gate_preds, + flexflow_tensor_t gate_assign, + flexflow_tensor_t true_gate_assign, + flexflow_tensor_t full_gate_gradients, + flexflow_tensor_t *exp_preds, + int n, + float lambda_bal, + flexflow_tensor_t *out, + char *name) { + return handle_errors(out, [&] { + return aggregate(deref_opaque(cg), + c_deref_opaque(gate_preds), + c_deref_opaque(gate_assign), + c_deref_opaque(true_gate_assign), + c_deref_opaque(full_gate_gradients), + c_deref_opaque_list(exp_preds, n), + lambda_bal, + maybe_string(name)); + }); +} +对应于函数 tensor_guid_t + insert_aggregate_layer(ComputationGraph &, + tensor_guid_t const &gate_preds, + tensor_guid_t const &gate_assign, + tensor_guid_t const &true_gate_assign, + tensor_guid_t const &full_gate_gradients, + std::vector const &exp_preds, + float lambda_bal, + optional const &name = nullopt); + +关于参数有std::vector的,参考上面这个函数, flexflow_tensor_t *exp_preds, + int n,组合成std::vector const &exp_preds, +*/ + optional to_internal(flexflow_param_sync_t); flexflow_param_sync_t to_external(optional); From 06555cfd7e3c09d69bff297cf5b287dc7df29a1b Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Mon, 21 Aug 2023 02:42:44 +0000 Subject: [PATCH 09/13] by using shape_t * and int to replace shape_list_t --- lib/op-attrs/ffi/internal/internal/op-attrs.h | 56 ++++--------------- 1 file changed, 11 insertions(+), 45 deletions(-) diff --git a/lib/op-attrs/ffi/internal/internal/op-attrs.h b/lib/op-attrs/ffi/internal/internal/op-attrs.h index f8e24b037b..f6ed7818be 100644 --- a/lib/op-attrs/ffi/internal/internal/op-attrs.h +++ b/lib/op-attrs/ffi/internal/internal/op-attrs.h @@ -110,13 +110,16 @@ flexflow_error_t flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, - flexflow_parallel_tensor_shape_list_t); + flexflow_parallel_tensor_shape_t *, + int num_exp_preds); flexflow_error_t flexflow_is_valid(flexflow_aggregate_attrs_t, flexflow_parallel_tensor_shape_t, - bool *out flexflow_parallel_tensor_shape_t, + bool *out , flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, - flexflow_parallel_tensor_shape_list_t); + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t *, + int num_exp_preds); flexflow_error_t flexflow_get_kProjSize(flexflow_multihead_attention_attrs_t, int *out); @@ -173,7 +176,8 @@ flexflow_error_t flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, - flexflow_parallel_tensor_shape_list_t); + flexflow_parallel_tensor_shape_t *, + int num_exp_preds); flexflow_error_t flexflow_get_output_shape( flexflow_batchnorm_attrs_t, @@ -263,50 +267,12 @@ flexflow_error_t flexflow_get_tensor_shape_unsafe( // &); flexflow_error_t flexflow_get_tensor_shape_unsafe( - flexflow_parallel_tesor_shape_list_t, - int n, - flexflow_tensor_shape_list_t - *out); // std::vector + flexflow_parallel_tesor_shape_t * input, + int num_input, + flexflow_tensor_shape_list_t *out); // std::vector // get_tensor_shape_unsafe(std::vector // const &); -/* -flexflow_error_t flexflow_computation_graph_add_op_aggregate( - flexflow_computation_graph_t cg, - flexflow_tensor_t gate_preds, - flexflow_tensor_t gate_assign, - flexflow_tensor_t true_gate_assign, - flexflow_tensor_t full_gate_gradients, - flexflow_tensor_t *exp_preds, - int n, - float lambda_bal, - flexflow_tensor_t *out, - char *name) { - return handle_errors(out, [&] { - return aggregate(deref_opaque(cg), - c_deref_opaque(gate_preds), - c_deref_opaque(gate_assign), - c_deref_opaque(true_gate_assign), - c_deref_opaque(full_gate_gradients), - c_deref_opaque_list(exp_preds, n), - lambda_bal, - maybe_string(name)); - }); -} -对应于函数 tensor_guid_t - insert_aggregate_layer(ComputationGraph &, - tensor_guid_t const &gate_preds, - tensor_guid_t const &gate_assign, - tensor_guid_t const &true_gate_assign, - tensor_guid_t const &full_gate_gradients, - std::vector const &exp_preds, - float lambda_bal, - optional const &name = nullopt); - -关于参数有std::vector的,参考上面这个函数, flexflow_tensor_t *exp_preds, - int n,组合成std::vector const &exp_preds, -*/ - optional to_internal(flexflow_param_sync_t); flexflow_param_sync_t to_external(optional); From 73329c54002fe533043a1379aaa21336fd8336b2 Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Mon, 21 Aug 2023 03:25:04 +0000 Subject: [PATCH 10/13] implement most of functions --- lib/op-attrs/ffi/internal/internal/op-attrs.h | 57 ++-- lib/op-attrs/ffi/src/op-attrs.cc | 319 ++++++++++++++++++ 2 files changed, 350 insertions(+), 26 deletions(-) diff --git a/lib/op-attrs/ffi/internal/internal/op-attrs.h b/lib/op-attrs/ffi/internal/internal/op-attrs.h index f6ed7818be..b00c4b6fca 100644 --- a/lib/op-attrs/ffi/internal/internal/op-attrs.h +++ b/lib/op-attrs/ffi/internal/internal/op-attrs.h @@ -51,7 +51,8 @@ using namespace FlexFlow; REGISTER_OPAQUE(flexflow_regularizer_attrs_t, optional); REGISTER_OPAQUE(flexflow_ff_dim_t, ff_dim_t); -// REGISTER_OPAQUE(flexflow_dim_ordered_t, DimOrdered); +// REGISTER_OPAQUE(flexflow_dim_ordered_t, DimOrdered); Note:how to define +// DimOrdered REGISTER_OPAQUE(flexflow_parallel_dim_t, ParallelDim); REGISTER_OPAQUE(flexflow_parallel_tensor_dims_t, ParallelTensorDims); REGISTER_OPAQUE(flexflow_parallel_tensor_shape_t, ParallelTensorShape); @@ -114,13 +115,23 @@ flexflow_error_t int num_exp_preds); flexflow_error_t flexflow_is_valid(flexflow_aggregate_attrs_t, flexflow_parallel_tensor_shape_t, - bool *out , + bool *out, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t, flexflow_parallel_tensor_shape_t *, int num_exp_preds); +flexflow_error_t + flexflow_get_output_shape(flexflow_aggregate_attrs_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t *out, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t *, + int num_exp_preds); + flexflow_error_t flexflow_get_kProjSize(flexflow_multihead_attention_attrs_t, int *out); flexflow_error_t flexflow_get_vProjSize(flexflow_multihead_attention_attrs_t, @@ -143,41 +154,29 @@ flexflow_error_t flexflow_get_qoSeqLength( flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); flexflow_error_t flexflow_get_kvSeqLength( flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); -flexflow_error_t flexflow_get_kvSeqLength( - flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); flexflow_error_t flexflow_get_num_samples( flexflow_multihead_attention_inputs_parallel_tensor_shape_t, int *out); flexflow_error_t flexflow_get_weights_shape( flexflow_multihead_attention_attrs_t, - flexflow_tensor_shape_t *out - flexflow_multihead_attention_inputs_tensor_shape_t); + flexflow_multihead_attention_inputs_tensor_shape_t, + flexflow_tensor_shape_t *out); flexflow_error_t flexflow_get_weights_shape( flexflow_multihead_attention_attrs_t, - flexflow_parallel_tensor_shape_t *out - flexflow_multihead_attention_inputs_parallel_tensor_shape_t); + flexflow_multihead_attention_inputs_parallel_tensor_shape_t, + flexflow_parallel_tensor_shape_t *out); flexflow_error_t flexflow_get_output_shape( flexflow_multihead_attention_attrs_t, - flexflow_parallel_tensor_shape_t *out, - flexflow_multihead_attention_inputs_tensor_shape_t); + flexflow_multihead_attention_inputs_tensor_shape_t, + flexflow_parallel_tensor_shape_t *out, ); flexflow_error_t flexflow_get_output_shape( flexflow_multihead_attention_attrs_t, - flexflow_tensor_shape_t *out, - flexflow_multihead_attention_inputs_tensor_shape_t); - -flexflow_error_t - flexflow_get_output_shape(flexflow_aggregate_attrs_t, - flexflow_parallel_tensor_shape_t, - flexflow_parallel_tensor_shape_t *out, - flexflow_parallel_tensor_shape_t, - flexflow_parallel_tensor_shape_t, - flexflow_parallel_tensor_shape_t, - flexflow_parallel_tensor_shape_t *, - int num_exp_preds); + flexflow_multihead_attention_inputs_tensor_shape_t, + flexflow_tensor_shape_t *out); flexflow_error_t flexflow_get_output_shape( flexflow_batchnorm_attrs_t, @@ -199,8 +198,8 @@ flexflow_error_t flexflow_get_bias_shape( flexflow_error_t flexflow_get_weights_shape( flexflow_embedding_attrs_t, flexflow_tensor_shape_t *out, - flexflow_tensor_shape); // TensorShape get_weights_shape(EmbeddingAttrs - // const &, TensorShape const &); + flexflow_tensor_shape_t); // TensorShape get_weights_shape(EmbeddingAttrs + // const &, TensorShape const &); // has some problem on loss_function.h // how to define LossFunction in loss_function.h @@ -231,6 +230,11 @@ flexflow_error_t flexflow_get_loss_function( // Note(lambda): have to define all // get_output_shape(op-attrs/include/op-attrs/get_output_shapes.h)? + +flexflow_error_t flexflow_is_valid(flexflow_parallel_dim_t, bool *out); + +flexflow_error_t flexflow_is_replica_dim(flexflow_parallel_dim_t, bool *out); + flexflow_error_t flexflow_is_valid(flexflow_parallel_tensor_dims_t, bool *out); // bool is_valid(ParallelTensorDims const &); @@ -267,9 +271,10 @@ flexflow_error_t flexflow_get_tensor_shape_unsafe( // &); flexflow_error_t flexflow_get_tensor_shape_unsafe( - flexflow_parallel_tesor_shape_t * input, + flexflow_parallel_tesor_shape_t *input, int num_input, - flexflow_tensor_shape_list_t *out); // std::vector + flexflow_tensor_shape_list_t + *out); // std::vector // get_tensor_shape_unsafe(std::vector // const &); diff --git a/lib/op-attrs/ffi/src/op-attrs.cc b/lib/op-attrs/ffi/src/op-attrs.cc index 7bc9b49d93..344bbd45ed 100644 --- a/lib/op-attrs/ffi/src/op-attrs.cc +++ b/lib/op-attrs/ffi/src/op-attrs.cc @@ -216,6 +216,325 @@ REGISTER_FFI_ENUM(flexflow_op_type_t, flexflow_error_t make_opattrs_error(flexflow_opattrs_error_code_t); +flexflow_error_t flexflow_get_output_shape( + flexflow_aggregate_specattrs_t aggregate_spec_attrs, + flexflow_parallel_tensor_shape_t gate_preds, + flexflow_parallel_tensor_shape_t *out, + flexflow_parallel_tensor_shape_t gate_assign, + flexflow_parallel_tensor_shape_t true_gate_assign, + flexflow_parallel_tensor_shape_t gate_gridents_full, + flexflow_parallel_tensor_shape_t *exp_preds, + int num_exp_preds) { + return handle_errors(out, [&]) { + return get_out_shape(deref_opaque(aggregate_spec_attrs), + deref_opaque(gate_preds), + deref_opaque(gate_assign), + deref_opaque(true_gate_assign), + deref_opaque(gate_gridents_full), + c_deref_opaque_list(exp_preds, num_exp_preds)); + } +} + +flexflow_error_t flexflow_is_valid( + flexflow_aggregate_attrs_t aggregate_attrs, + flexflow_parallel_tensor_shape_t gate_preds, + bool *out, + flexflow_parallel_tensor_shape_t gate_assign, + flexflow_parallel_tensor_shape_t true_gate_assign, + flexflow_parallel_tensor_shape_t full_gate_gradients, + flexflow_parallel_tensor_shape_t *exp_preds int num_exp_preds) { + return handle_errors(out, [&]) { + return is_valid(deref_opaque(aggregate_attrs), + deref_opaque(gate_preds), + deref_opaque(gate_assign), + deref_opaque(true_gate_assign), + deref_opaque(full_gate_gradients), + c_deref_opaque_list(exp_preds, num_exp_preds)); + } +} + +flexflow_error_t flexflow_get_output_shape( + flexflow_aggregate_attrs_t aggregate_attrs, + flexflow_parallel_tensor_shape_t gate_preds, + flexflow_parallel_tensor_shape_t *out, + flexflow_parallel_tensor_shape_t gate_assign, + flexflow_parallel_tensor_shape_t true_gate_assign, + flexflow_parallel_tensor_shape_t full_gate_gradients, + flexflow_parallel_tensor_shape_t *exp_preds, + int num_exp_preds) { + return handle_errors(out, [&]) { + return get_out_shape(deref_opaque(aggregate_attrs), + deref_opaque(gate_preds), + deref_opaque(gate_assign), + deref_opaque(true_gate_assign), + deref_opaque(full_gate_gradients), + c_deref_opaque_list(exp_preds, num_exp_preds)); + } +} + +flexflow_error_t flexflow_get_kProjSize( + flexflow_multihead_attention_attrs_t multi_head_attention_attrs, int *out) { + return handle_errors(out, [&]) { + return get_kProjSize(deref_opaque(multi_head_attention_attrs)); + } +} + +flexflow_error_t flexflow_get_vProjSize( + flexflow_multihead_attention_attrs_t multi_head_attention_attrs, int *out) { + return handle_errors(out, [&]) { + return get_vProjSize(deref_opaque(multi_head_attention_attrs)); + } +} + +flexflow_error_t flexflow_get_kProjSize( + flexflow_multihead_attention_attrs_t multi_head_attention_attrs, int *out) { + return handle_errors(out, [&]) { + return get_kProjSize(deref_opaque(multi_head_attention_attrs)); + } +} + +flexflow_error_t flexflow_get_oProjSize( + flexflow_multihead_attention_attrs_t multi_head_attention_attrs, int *out) { + return handle_errors(out, [&]) { + return get_oProjSize(deref_opaque(multi_head_attention_attrs)); + } +} + +flexflow_error_t flexflow_get_qSize( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t + multi_head_attention_inputs, + int *out) { + return handle_errors(out, [&]) { + return get_qSize(deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t flexflow_get_kSize( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t + multi_head_attention_inputs, + int *out) { + return handle_errors(out, [&]) { + return get_kSize(deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t flexflow_get_vSize( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t + multi_head_attention_inputs, + int *out) { + return handle_errors(out, [&]) { + return get_vSize(deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t flexflow_get_oSize( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t + multi_head_attention_inputs, + int *out) { + return handle_errors(out, [&]) { + return get_oSize(deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t flexflow_get_qoSeqLength( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t + multi_head_attention_inputs, + int *out) { + return handle_errors(out, [&]) { + return get_qoSeqLength(deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t flexflow_get_kvSeqLength( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t + multi_head_attention_inputs, + int *out) { + return handle_errors(out, [&]) { + return get_kvSeqLength(deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t flexflow_get_num_samples( + flexflow_multihead_attention_inputs_parallel_tensor_shape_t + multi_head_attention_inputs, + int *out) { + return handle_errors(out, [&]) { + return get_num_samples(deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t flexflow_get_weights_shape( + flexflow_multihead_attention_attrs_t multi_head_attention_attrs, + flexflow_multihead_attention_inputs_tensor_shape_t + multi_head_attention_inputs, + flexflow_tensor_shape_t *out) { + return handle_errors(out, [&]) { + return get_weights_shape(deref_opaque(multi_head_attention_attrs), + deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t flexflow_get_weights_shape( + flexflow_multihead_attention_attrs_t multi_head_attention_attrs, + flexflow_multihead_attention_inputs_parallel_tensor_shape_t + multi_head_attention_inputs, + flexflow_parallel_tensor_shape_t *out) { + return handle_errors(out, [&]) { + return get_weights_shape(deref_opaque(multi_head_attention_attrs), + deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t flexflow_get_output_shape( + flexflow_multihead_attention_attrs_t multi_head_attention_attrs, + flexflow_multihead_attention_inputs_tensor_shape_t + multi_head_attention_inputs, + flexflow_parallel_tensor_shape_t *out, ) { + return handle_errors(out, [&]) { + return get_output_shape(deref_opaque(multi_head_attention_attrs), + deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t flexflow_get_output_shape( + flexflow_multihead_attention_attrs_t multi_head_attention_attrs, + flexflow_multihead_attention_inputs_tensor_shape_t + multi_head_attention_inputs, + flexflow_tensor_shape_t *out) { + return handle_errors(out, [&]) { + return get_output_shape(deref_opaque(multi_head_attention_attrs), + deref_opaque(multi_head_attention_inputs)); + } +} + +flexflow_error_t + flexflow_get_output_shape(flexflow_batchnorm_attrs_t batchnorm_attrs, + flexflow_parallel_tensor_shape_t *out) { + return handle_errors(out, [&]) { + return get_output_shape(deref_opaque(batchnorm_attrs)); + } +} + +flexflow_error_t + flexflow_get_kernel_shape(flexflow_conv2d_attrs_t conv2d_attrs, + flexflow_tensor_shape_t *out, + flexflow_tensor_shape_t input_shape) { + return handle_errors(out, [&]) { + return get_kernel_shape(deref_opaque(conv2d_attrs), + deref_opaque(input_shape)); + } +} + +flexflow_error_t flexflow_get_bias_shape(flexflow_conv2d_attrs_t conv2d_attrs, + flexflow_tensor_shape_t *out, + flexflow_tensor_shape_t input_shape) { + return handle_errors(out, [&]) { + return get_bias_shape(deref_opaque(conv2d_attrs), + deref_opaque(input_shape)); + } +} + +flexflow_error_t + flexflow_get_weights_shape(flexflow_embedding_attrs_t embedding_attrs, + flexflow_tensor_shape_t *out, + flexflow_tensor_shape_t input_shape) { + return handle_errors(out, [&]) { + return get_weights_shape(deref_opaque(embedding_attrs), + deref_opaque(input_shape)); + } +} + +flexflow_error_t + flexflow_parse_loss_function_name(char **raw_name, + flexflow_loss_function_t *out) { + NOT_IMPLEMENTED(); // Note(lambda):how to implement the function +} + +flexflow_error_t flexflow_is_valid(flexflow_parallel_dim_t parallel_dim_t, + bool *out) { + return handle_errors(out, [&]) { + return is_valid(deref_opaque(parallel_dim_t)); + } +} + +flexflow_error_t flexflow_is_replica_dim(flexflow_parallel_dim_t parallel_dim_t, + bool *out) { + return handle_errors(out, [&]) { + return is_replica_dim(deref_opaque(parallel_dim_t)); + } +} + +flexflow_error_t + flexflow_is_valid(flexflow_parallel_tensor_dims_t parallel_tensor_dims_t, + bool *out) { + return handle_errors(out, [&]) { + return is_valid(deref_opaque(parallel_tensor_dims_t)); + } +} + +flexflow_error_t flexflow_get_piece_dims( + flexflow_parallel_tensor_dims_t parallel_tensor_dims_t, + flexflow_tensor_dims_t *out) { + return handle_errors(out, [&]) { + return get_piece_dims(deref_opaque(parallel_tensor_dims_t)); + } +} + +flexflow_error_t flexflow_get_tensor_dims_unsafe( + flexflow_parallel_tensor_dims_t tensor_dims_t, + flexflow_tensor_dims_t *out) { + return handle_errors(out, [&]) { + return get_tensor_dims_unsafe(deref_opaque(tensor_dims_t)); + } +} + +flexflow_error_t flexflow_get_piece_shape( + flexflow_parallel_tensor_shape_t parallel_tensor_shape, + flexflow_tensor_shape_t *out) { + return handle_errors(out, [&]) { + return get_piece_shape(deref_opaque(parallel_tensor_shape)); + } +} + +flexflow_error_t flexflow_get_num_replica_dims( + flexflolw_parallel_tensor_shape_t parallel_tensor_shape, int *out) { + return handle_errors(out, [&]) { + return get_num_replica_dims(deref_opaque(parallel_tensor_shape)); + } +} + +flexflow_error_t flexflow_get_num_replicas( + flexflow_parallel_tensor_shape_t parallel_tensor_shape, int *out) { + return handle_errors(out, [&]) { + return get_num_replicas(deref_opaque(parallel_tensor_shape)); + } +} + +flexflow_error_t + flexflow_is_valid(flexflow_parallel_tensor_shape_t parallel_tensor_shape, + bool *out) { + return handle_errors(out, [&]) { + return is_valid(deref_opaque(parallel_tensor_shape)); + } +} + +flexflow_error_t flexflow_get_tensor_shape_unsafe( + flexflow_parallel_tensor_shape_t parallel_tensor_shape, + flexflow_tensor_shape_t *out) { + return handle_errors(out, [&]) { + return get_tensor_shape_unsafe(deref_opaque(parallel_tensor_shape)); + } +} + +flexflow_error_t + flexflow_get_tensor_shape_unsafe(flexflow_parallel_tesor_shape_t *input, + int num_input, + flexflow_tensor_shape_list_t *out) { + return handle_errors(out, [&]) { + return get_tensor_shape_unsafe(c_deref_opaque_list(input, num_input)); + } +} + ParamSync to_internal(flexflow_param_sync_t e) { return to_internal_impl(e); } From ea1512700ef0ce6ac411741f6dfb4c18aba9db88 Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Mon, 21 Aug 2023 03:26:57 +0000 Subject: [PATCH 11/13] remove the comment and format the code --- lib/op-attrs/ffi/internal/internal/op-attrs.h | 128 +++++++----------- 1 file changed, 48 insertions(+), 80 deletions(-) diff --git a/lib/op-attrs/ffi/internal/internal/op-attrs.h b/lib/op-attrs/ffi/internal/internal/op-attrs.h index b00c4b6fca..7e7c168ba5 100644 --- a/lib/op-attrs/ffi/internal/internal/op-attrs.h +++ b/lib/op-attrs/ffi/internal/internal/op-attrs.h @@ -178,52 +178,34 @@ flexflow_error_t flexflow_get_output_shape( flexflow_multihead_attention_inputs_tensor_shape_t, flexflow_tensor_shape_t *out); -flexflow_error_t flexflow_get_output_shape( - flexflow_batchnorm_attrs_t, - flexflow_parallel_tensor_shape_t - *out); // ParallelTensorShape get_output_shape(BatchNormAttrs const &); - -flexflow_error_t flexflow_get_kernel_shape( - flexflow_conv2d_attrs_t, - flexflow_tensor_shape_t *out, - flexflow_tensor_shape_t); // TensorShape get_kernel_shape(Conv2DAttrs const - // &. TensorShape const &); - -flexflow_error_t flexflow_get_bias_shape( - flexflow_conv2d_attrs_t, - flexflow_tensor_shape_t *out, - flexflow_tensor_shape_t); // TensorShape get_bias_shape(Conv2DAttrs const &, - // TensorShape const &); +flexflow_error_t + flexflow_get_output_shape(flexflow_batchnorm_attrs_t, + flexflow_parallel_tensor_shape_t *out); -flexflow_error_t flexflow_get_weights_shape( - flexflow_embedding_attrs_t, - flexflow_tensor_shape_t *out, - flexflow_tensor_shape_t); // TensorShape get_weights_shape(EmbeddingAttrs - // const &, TensorShape const &); - -// has some problem on loss_function.h -// how to define LossFunction in loss_function.h -flexflow_error_t flexflow_parse_loss_function_name( - char **, - flexflow_loss_function_t - *out); // LossFunction parse_loss_function_name(std::string const &); +flexflow_error_t flexflow_get_kernel_shape(flexflow_conv2d_attrs_t, + flexflow_tensor_shape_t *out, + flexflow_tensor_shape_t); -flexflow_error_t flexflow_get_loss_function( - flexflow_other_loss_attrs_t, - flexflow_loss_function_t - *out); // LossFunction get_loss_function(OtherLossAttrs const &); +flexflow_error_t flexflow_get_bias_shape(flexflow_conv2d_attrs_t, + flexflow_tensor_shape_t *out, + flexflow_tensor_shape_t); + +flexflow_error_t flexflow_get_weights_shape(flexflow_embedding_attrs_t, + flexflow_tensor_shape_t *out, + flexflow_tensor_shape_t); + +flexflow_error_t + flexflow_parse_loss_function_name(char **, flexflow_loss_function_t *out); + +flexflow_error_t flexflow_get_loss_function(flexflow_other_loss_attrs_t, + flexflow_loss_function_t *out); flexflow_error_t flexflow_get_loss_function( flexflow_sparse_categorical_crossentropy_loss_attrs_t, - flexflow_loss_function_t - *out); // LossFunction - // get_loss_function(SparseCategoricalCrossEntropyLossAttrs - // const &); + flexflow_loss_function_t *out); -flexflow_error_t flexflow_get_loss_function( - flexflow_loss_attrs_t, - flexflow_loss_function_t - *out); // LossFunction get_loss_function(LossAttrs const &); +flexflow_error_t flexflow_get_loss_function(flexflow_loss_attrs_t, + flexflow_loss_function_t *out); // TODO(Note lambda):how to define nner_to_outer_idxs, outer_to_inner_idxs,how // to define DimOrdered outer_to_inner(op-attrs/include/op-attrs/dim_ordered.h) @@ -235,48 +217,34 @@ flexflow_error_t flexflow_is_valid(flexflow_parallel_dim_t, bool *out); flexflow_error_t flexflow_is_replica_dim(flexflow_parallel_dim_t, bool *out); +flexflow_error_t flexflow_is_valid(flexflow_parallel_tensor_dims_t, bool *out); + +flexflow_error_t flexflow_get_piece_dims(flexflow_parallel_tensor_dims_t, + flexflow_tensor_dims_t *out); + flexflow_error_t - flexflow_is_valid(flexflow_parallel_tensor_dims_t, - bool *out); // bool is_valid(ParallelTensorDims const &); - -flexflow_error_t flexflow_get_piece_dims( - flexflow_parallel_tensor_dims_t, - flexflow_tensor_dims_t - *out); // TensorDims get_piece_dims(ParallelTensorDims const &); - -flexflow_error_t flexflow_get_tensor_dims_unsafe( - flexflow_parallel_tensor_dims_t, - flexflow_tensor_dims_t - *out); // TensorDims get_tensor_dims_unsafe(ParallelTensorDims const &); - -flexflow_error_t flexflow_get_piece_shape( - flexflow_parallel_tensor_shape_t, - flexflow_tensor_shape_t - *out); // TensorShape get_piece_shape(ParallelTensorShape const &); - -flexflow_error_t flexflow_get_num_replica_dims( - flexflolw_parallel_tensor_shape_t, - int *out); // int get_num_replica_dims(ParallelTensorShape const &); -flexflow_error_t flexflow_get_num_replicas( - flexflow_parallel_tensor_shape_t, - int *out); // int get_num_replicas(ParallelTensorShape const &); + flexflow_get_tensor_dims_unsafe(flexflow_parallel_tensor_dims_t, + flexflow_tensor_dims_t *out); + +flexflow_error_t flexflow_get_piece_shape(flexflow_parallel_tensor_shape_t, + flexflow_tensor_shape_t *out); + +flexflow_error_t + flexflow_get_num_replica_dims(flexflolw_parallel_tensor_shape_t, int *out); + +flexflow_error_t flexflow_get_num_replicas(flexflow_parallel_tensor_shape_t, + int *out); + +flexflow_error_t flexflow_is_valid(flexflow_parallel_tensor_shape_t, bool *out); + +flexflow_error_t + flexflow_get_tensor_shape_unsafe(flexflow_parallel_tensor_shape_t, + flexflow_tensor_shape_t *out); + flexflow_error_t - flexflow_is_valid(flexflow_parallel_tensor_shape_t, - bool *out); // bool is_valid(ParallelTensorShape const &); - -flexflow_error_t flexflow_get_tensor_shape_unsafe( - flexflow_parallel_tensor_shape_t, - flexflow_tensor_shape_t - *out); // TensorShape get_tensor_shape_unsafe(ParallelTensorShape const - // &); - -flexflow_error_t flexflow_get_tensor_shape_unsafe( - flexflow_parallel_tesor_shape_t *input, - int num_input, - flexflow_tensor_shape_list_t - *out); // std::vector - // get_tensor_shape_unsafe(std::vector - // const &); + flexflow_get_tensor_shape_unsafe(flexflow_parallel_tesor_shape_t *input, + int num_input, + flexflow_tensor_shape_list_t *out); optional to_internal(flexflow_param_sync_t); flexflow_param_sync_t to_external(optional); From 8a5458c52c7b356873997d77ccf80b48fea009c4 Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Mon, 21 Aug 2023 03:48:52 +0000 Subject: [PATCH 12/13] add head file --- lib/op-attrs/ffi/internal/internal/op-attrs.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/op-attrs/ffi/internal/internal/op-attrs.h b/lib/op-attrs/ffi/internal/internal/op-attrs.h index 7e7c168ba5..4c0ffae80a 100644 --- a/lib/op-attrs/ffi/internal/internal/op-attrs.h +++ b/lib/op-attrs/ffi/internal/internal/op-attrs.h @@ -8,6 +8,8 @@ #include "op-attrs/datatype.h" #include "op-attrs/dim_ordered.h" #include "op-attrs/ff_dim.h" +#include "op-attrs/get_op_type.h" +#include "op-attrs/get_output_shapes.h" #include "op-attrs/op.h" #include "op-attrs/ops/aggreagate.h" #include "op-attrs/ops/aggregate_spec.h" From 24c5a093ae7fd1ae110c4968889eb0e550b40a8f Mon Sep 17 00:00:00 2001 From: lambda7xx Date: Mon, 21 Aug 2023 07:50:04 +0000 Subject: [PATCH 13/13] implement the flexflow_get_datatype_size and flexflow_operator_attrs_get_op_type --- lib/op-attrs/ffi/include/flexflow/op-attrs.h | 4 +++- lib/op-attrs/ffi/internal/internal/op-attrs.h | 2 ++ lib/op-attrs/ffi/src/op-attrs.cc | 17 +++++++++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/lib/op-attrs/ffi/include/flexflow/op-attrs.h b/lib/op-attrs/ffi/include/flexflow/op-attrs.h index 20dc2d8d03..991fc9fd88 100644 --- a/lib/op-attrs/ffi/include/flexflow/op-attrs.h +++ b/lib/op-attrs/ffi/include/flexflow/op-attrs.h @@ -217,7 +217,9 @@ typedef enum { // does _not_ have to stay synchronized with op-attrs/op.h typedef struct { flexflow_op_type_t op_type; void *data; -} flexflow_operator_attrs_t; +} flexflow_operator_attrs; + +FF_NEW_OPAQUE_TYPE(flexflow_operator_attrs_t); flexflow_opattrs_error_t flexflow_get_datatype_size(flexflow_datatype_t, int *out); diff --git a/lib/op-attrs/ffi/internal/internal/op-attrs.h b/lib/op-attrs/ffi/internal/internal/op-attrs.h index 4c0ffae80a..b2ac652d04 100644 --- a/lib/op-attrs/ffi/internal/internal/op-attrs.h +++ b/lib/op-attrs/ffi/internal/internal/op-attrs.h @@ -106,6 +106,8 @@ REGISTER_OPAQUE(flexflow_softmax_attrs_t, SoftmaxAttrs); REGISTER_OPAQUE(flexflow_split_attrs_t, SplitAttrs); REGISTER_OPAQUE(flexflow_topk_attrs_t, TopKAttrs); +REGISTER_OPAQUE(flexflow_operator_attrs_t, flexflow_operator_attrs); + flexflow_error_t flexflow_get_output_shape(flexflow_aggregate_specattrs_t, flexflow_parallel_tensor_shape_t, diff --git a/lib/op-attrs/ffi/src/op-attrs.cc b/lib/op-attrs/ffi/src/op-attrs.cc index 344bbd45ed..c28c4b624e 100644 --- a/lib/op-attrs/ffi/src/op-attrs.cc +++ b/lib/op-attrs/ffi/src/op-attrs.cc @@ -535,9 +535,25 @@ flexflow_error_t } } +flexflow_opattrs_error_t + flexflow_get_datatype_size(flexflow_datatype_t datatype, int *out) { + return handle_errors(out, [&]) { + return size_of(to_internal(datatype)); + } +} + +flexflow_opattrs_error_t + flexflow_operator_attrs_get_op_type(flexflow_operator_attrs_t op_attrs, + flexflow_op_type_t *out) { + return handle_errors(out, [&]) { + return deref_opaque(op_attrs).op_type; + } +} + ParamSync to_internal(flexflow_param_sync_t e) { return to_internal_impl(e); } + flexflow_param_sync_t to_external(ParamSync i) { return to_external_impl(i); } @@ -552,6 +568,7 @@ flexflow_datatype_t to_external(DataType i) { optional to_internal(flexflow_activation_t e) { return to_internal_impl(e); } + flexflow_activation_t to_external(optional i) { return to_external_impl(i); }