From 01d88683e8ebc910657d440f604902425d364112 Mon Sep 17 00:00:00 2001 From: westfish Date: Tue, 20 Dec 2022 03:15:53 +0000 Subject: [PATCH 1/3] fix bug --- .../unimo-text/export_model.py | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/examples/question_generation/unimo-text/export_model.py b/examples/question_generation/unimo-text/export_model.py index 44ae51080a0f..b772fc303588 100644 --- a/examples/question_generation/unimo-text/export_model.py +++ b/examples/question_generation/unimo-text/export_model.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os import argparse +import os +from pprint import pprint import paddle -from pprint import pprint - -from paddlenlp.transformers import UNIMOLMHeadModel, UNIMOTokenizer from paddlenlp.ops import FasterUNIMOText - +from paddlenlp.transformers import UNIMOLMHeadModel, UNIMOTokenizer from paddlenlp.utils.log import logger @@ -65,24 +63,24 @@ def do_predict(args): unimo_text, input_spec=[ # input_ids - paddle.static.InputSpec(shape=[None, None], dtype="int64"), + paddle.static.InputSpec(shape=[None, None], dtype="int32"), # token_type_ids - paddle.static.InputSpec(shape=[None, None], dtype="int64"), + paddle.static.InputSpec(shape=[None, None], dtype="int32"), # attention_mask paddle.static.InputSpec(shape=[None, 1, None, None], - dtype="float64"), + dtype="float32"), # seq_len - paddle.static.InputSpec(shape=[None], dtype="int64"), + paddle.static.InputSpec(shape=[None], dtype="int32"), args.max_dec_len, args.min_dec_len, args.topk, args.topp, - args.num_beams, # num_beams. Used for beam_search. + args.num_beams, # num_beams. Used for beam_search. args.decoding_strategy, tokenizer.cls_token_id, # cls/bos tokenizer.mask_token_id, # mask/eos tokenizer.pad_token_id, # pad - args.diversity_rate, # diversity rate. Used for beam search. + args.diversity_rate, # diversity rate. Used for beam search. args.temperature, args.num_return_sequences, args.length_penalty, From 00ece2e770899217a787bbf49c36e1336a8a96e4 Mon Sep 17 00:00:00 2001 From: westfish Date: Tue, 20 Dec 2022 04:14:27 +0000 Subject: [PATCH 2/3] update fix --- examples/question_generation/unimo-text/export_model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/question_generation/unimo-text/export_model.py b/examples/question_generation/unimo-text/export_model.py index b772fc303588..ea8d9c1a2e40 100644 --- a/examples/question_generation/unimo-text/export_model.py +++ b/examples/question_generation/unimo-text/export_model.py @@ -63,14 +63,14 @@ def do_predict(args): unimo_text, input_spec=[ # input_ids - paddle.static.InputSpec(shape=[None, None], dtype="int32"), + paddle.static.InputSpec(shape=[None, None], dtype="int64"), # token_type_ids - paddle.static.InputSpec(shape=[None, None], dtype="int32"), + paddle.static.InputSpec(shape=[None, None], dtype="int64"), # attention_mask paddle.static.InputSpec(shape=[None, 1, None, None], dtype="float32"), # seq_len - paddle.static.InputSpec(shape=[None], dtype="int32"), + paddle.static.InputSpec(shape=[None], dtype="int64"), args.max_dec_len, args.min_dec_len, args.topk, From 9e0984b5cf0379869f0ec61491f05ec8945410d1 Mon Sep 17 00:00:00 2001 From: westfish Date: Tue, 20 Dec 2022 06:57:47 +0000 Subject: [PATCH 3/3] update unimo infer with summarization --- .../text_summarization/unimo-text/export_model.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/applications/text_summarization/unimo-text/export_model.py b/applications/text_summarization/unimo-text/export_model.py index c9a79c465a14..4dc1a0b58fe5 100644 --- a/applications/text_summarization/unimo-text/export_model.py +++ b/applications/text_summarization/unimo-text/export_model.py @@ -11,16 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import os import argparse +import os +from pprint import pprint import paddle -from pprint import pprint - -from paddlenlp.transformers import UNIMOLMHeadModel, UNIMOTokenizer from paddlenlp.ops import FasterUNIMOText - +from paddlenlp.transformers import UNIMOLMHeadModel, UNIMOTokenizer from paddlenlp.utils.log import logger @@ -82,13 +80,13 @@ def do_predict(args): unimo_text, input_spec=[ # input_ids - paddle.static.InputSpec(shape=[None, None], dtype="int32"), + paddle.static.InputSpec(shape=[None, None], dtype="int64"), # token_type_ids - paddle.static.InputSpec(shape=[None, None], dtype="int32"), + paddle.static.InputSpec(shape=[None, None], dtype="int64"), # attention_mask paddle.static.InputSpec(shape=[None, 1, None, None], dtype="float32"), # seq_len - paddle.static.InputSpec(shape=[None], dtype="int32"), + paddle.static.InputSpec(shape=[None], dtype="int64"), args.max_out_len, args.min_out_len, args.topk,