Skip to content

Commit ee88c12

Browse files
use tensor.shape bug not paddle.shape(tensor) (#8260)
* use tensor.shape bug not paddle.shape(tensor) * refine * refine
1 parent f658fa7 commit ee88c12

File tree

68 files changed

+292
-326
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

68 files changed

+292
-326
lines changed

examples/language_model/moe/dygraph/modeling.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -748,8 +748,8 @@ def forward(self, input_ids, position_ids=None, attention_mask=None, use_cache=F
748748
if position_ids is None:
749749
past_length = 0
750750
if cache is not None:
751-
past_length = paddle.shape(cache[0].k)[-2]
752-
position_ids = paddle.arange(past_length, paddle.shape(input_ids)[-1] + past_length, dtype="int64")
751+
past_length = cache[0].k.shape[-2]
752+
position_ids = paddle.arange(past_length, input_ids.shape[-1] + past_length, dtype="int64")
753753
position_ids = position_ids.unsqueeze(0)
754754
# .expand_as(input_ids)
755755
position_ids = paddle.expand_as(position_ids, input_ids)

examples/model_interpretation/task/senti/rnn/model.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ def forward(self, input, mask=None):
207207
# Shape: (batch_size, max_seq_len, hidden_size)
208208
h = paddle.add_n([forward_input, backward_input])
209209
# Shape: (batch_size, hidden_size, 1)
210-
att_weight = self.att_weight.tile(repeat_times=(paddle.shape(h)[0], 1, 1))
210+
att_weight = self.att_weight.tile(repeat_times=(h.shape[0], 1, 1))
211211
# Shape: (batch_size, max_seq_len, 1)
212212
att_score = paddle.bmm(paddle.tanh(h), att_weight)
213213
if mask is not None:
@@ -246,20 +246,18 @@ def forward(self, input, mask=None):
246246
Tensor is a bool tensor, whose each element identifies whether the input word id is pad token or not.
247247
Defaults to `None
248248
"""
249-
weight = self.input_weight.tile(
250-
repeat_times=(paddle.shape(input)[0], 1, 1)
251-
) # tensor[batch, hidden_size, hidden_size]
252-
bias = self.bias.tile(repeat_times=(paddle.shape(input)[0], 1, 1)) # tensor[batch, 1, hidden_size]
249+
weight = self.input_weight.tile(repeat_times=(input.shape[0], 1, 1)) # tensor[batch, hidden_size, hidden_size]
250+
bias = self.bias.tile(repeat_times=(input.shape[0], 1, 1)) # tensor[batch, 1, hidden_size]
253251
word_squish = paddle.bmm(input, weight) + bias # Shape: (batch_size, seq_len, hidden_size)
254252
att_context_vector = self.att_context_vector.tile(
255-
repeat_times=(paddle.shape(input)[0], 1, 1)
253+
repeat_times=(input.shape[0], 1, 1)
256254
) # Shape: (batch_size, hidden_size, 1)
257255
att_score = paddle.bmm(word_squish, att_context_vector) # tensor[batch_size, seq_len, 1]
258256
if mask is not None:
259257
# mask, remove the effect of 'PAD'
260258
mask = paddle.cast(mask, dtype="float32")
261259
mask = mask.unsqueeze(axis=-1)
262-
inf_tensor = paddle.full(shape=paddle.shape(mask), dtype="float32", fill_value=-INF)
260+
inf_tensor = paddle.full(shape=mask.shape, dtype="float32", fill_value=-INF)
263261
att_score = paddle.multiply(att_score, mask) + paddle.multiply(inf_tensor, (1 - mask))
264262
att_weight = F.softmax(att_score, axis=1) # tensor[batch_size, seq_len, 1]
265263

examples/simultaneous_translation/stacl/demo/model_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def greedy_search(self, src_word, max_len=256, waitk=-1, caches=None, bos_id=Non
3434
So, it needsprevious state(caches) and last one of generated
3535
tokens id last time.
3636
"""
37-
src_max_len = paddle.shape(src_word)[-1]
37+
src_max_len = src_word.shape[-1]
3838
base_attn_bias = (
3939
paddle.cast(src_word == self.bos_id, dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e9
4040
)

examples/simultaneous_translation/stacl/model.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,11 @@
1515
from __future__ import print_function
1616

1717
import numpy as np
18-
1918
import paddle
2019
import paddle.nn as nn
2120
import paddle.nn.functional as F
22-
from paddlenlp.transformers import WordEmbedding, PositionalEmbedding
21+
22+
from paddlenlp.transformers import PositionalEmbedding, WordEmbedding
2323

2424

2525
class CrossEntropyCriterion(nn.Layer):
@@ -190,8 +190,8 @@ def __init__(
190190
self.linear = nn.Linear(in_features=d_model, out_features=trg_vocab_size, bias_attr=False)
191191

192192
def forward(self, src_word, trg_word):
193-
src_max_len = paddle.shape(src_word)[-1]
194-
trg_max_len = paddle.shape(trg_word)[-1]
193+
src_max_len = src_word.shape[-1]
194+
trg_max_len = trg_word.shape[-1]
195195
base_attn_bias = (
196196
paddle.cast(src_word == self.bos_id, dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e9
197197
)
@@ -236,7 +236,7 @@ def beam_search(self, src_word, beam_size=4, max_len=256, waitk=-1):
236236
raise NotImplementedError
237237

238238
def greedy_search(self, src_word, max_len=256, waitk=-1):
239-
src_max_len = paddle.shape(src_word)[-1]
239+
src_max_len = src_word.shape[-1]
240240
base_attn_bias = (
241241
paddle.cast(src_word == self.bos_id, dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e9
242242
)

examples/text_classification/rnn/model.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ def forward(self, input, mask=None):
253253
# Shape: (batch_size, max_seq_len, hidden_size)
254254
h = paddle.add_n([forward_input, backward_input])
255255
# Shape: (batch_size, hidden_size, 1)
256-
att_weight = self.att_weight.tile(repeat_times=(paddle.shape(h)[0], 1, 1))
256+
att_weight = self.att_weight.tile(repeat_times=(h.shape[0], 1, 1))
257257
# Shape: (batch_size, max_seq_len, 1)
258258
att_score = paddle.bmm(paddle.tanh(h), att_weight)
259259
if mask is not None:
@@ -292,19 +292,19 @@ def forward(self, input, mask=None):
292292
Tensor is a bool tensor, whose each element identifies whether the input word id is pad token or not.
293293
Defaults to `None
294294
"""
295-
weight = self.input_weight.tile(repeat_times=(paddle.shape(input)[0], 1, 1))
296-
bias = self.bias.tile(repeat_times=(paddle.shape(input)[0], 1, 1))
295+
weight = self.input_weight.tile(repeat_times=(input.shape[0], 1, 1))
296+
bias = self.bias.tile(repeat_times=(input.shape[0], 1, 1))
297297
# Shape: (batch_size, max_seq_len, hidden_size)
298298
word_squish = paddle.bmm(input, weight) + bias
299299

300-
att_context_vector = self.att_context_vector.tile(repeat_times=(paddle.shape(input)[0], 1, 1))
300+
att_context_vector = self.att_context_vector.tile(repeat_times=(input.shape[0], 1, 1))
301301
# Shape: (batch_size, max_seq_len, 1)
302302
att_score = paddle.bmm(word_squish, att_context_vector)
303303
if mask is not None:
304304
# mask, remove the effect of 'PAD'
305305
mask = paddle.cast(mask, dtype="float32")
306306
mask = mask.unsqueeze(axis=-1)
307-
inf_tensor = paddle.full(shape=paddle.shape(mask), dtype="float32", fill_value=-INF)
307+
inf_tensor = paddle.full(shape=mask.shape, dtype="float32", fill_value=-INF)
308308
att_score = paddle.multiply(att_score, mask) + paddle.multiply(inf_tensor, (1 - mask))
309309
att_weight = F.softmax(att_score, axis=1)
310310

examples/text_to_sql/RAT-SQL/text2sql/utils/nn_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def batch_gather_2d(var, indices):
7474
"shape of indices error. it should be a 2-D layers. " "but got shape = %s" % (str(indices.shape),)
7575
)
7676

77-
batch_size = paddle.shape(indices)[0]
77+
batch_size = indices.shape[0]
7878

7979
zero = paddle.to_tensor([0], dtype="int64")
8080
one = paddle.to_tensor([1], dtype="int64")

llm/ernie-3.5-se/modeling.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ def scaled_dot_product_attention(
142142
query_states, key_states, value_states, attention_mask, output_attentions, config, is_causal=True
143143
):
144144

145-
bsz, q_len, num_heads, _ = paddle.shape(query_states)
145+
bsz, q_len, num_heads, _ = query_states.shape
146146
head_dim = config.hidden_size // config.num_attention_heads
147147
_, kv_seq_len, _, _ = value_states.shape
148148

@@ -1054,7 +1054,7 @@ def forward(
10541054
seq_length_with_past = seq_length
10551055
cache_length = 0
10561056
if past_key_values[0] is not None:
1057-
cache_length = paddle.shape(past_key_values[0][0])[1]
1057+
cache_length = past_key_values[0][0].shape[1]
10581058
seq_length_with_past += cache_length
10591059
if inputs_embeds is None:
10601060
inputs_embeds = self.embed_tokens(input_ids).astype(self.embed_tokens.weight.dtype)

model_zoo/gpt-3/ppfleetx/models/language_model/gpt/auto/auto_model.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -735,8 +735,8 @@ def forward(self, input_ids, position_ids=None, attention_mask=None, use_cache=F
735735
if position_ids is None:
736736
past_length = 0
737737
if cache is not None:
738-
past_length = paddle.shape(attention_mask)[-1] - 1
739-
position_ids = paddle.arange(past_length, paddle.shape(input_ids)[-1] + past_length, dtype=input_ids.dtype)
738+
past_length = attention_mask.shape[-1] - 1
739+
position_ids = paddle.arange(past_length, input_ids.shape[-1] + past_length, dtype=input_ids.dtype)
740740
position_ids = position_ids.unsqueeze(0)
741741
position_ids = paddle.expand_as(position_ids, input_ids)
742742

@@ -753,7 +753,7 @@ def forward(self, input_ids, position_ids=None, attention_mask=None, use_cache=F
753753
if not self.fused_softmax_with_triangular or not paddle.is_compiled_with_cuda():
754754
# TODO, use registered buffer
755755
causal_mask = paddle.tensor.triu(
756-
paddle.ones((paddle.shape(input_ids)[-1], paddle.shape(input_ids)[-1])) * -1e4, diagonal=1
756+
paddle.ones((input_ids.shape[-1], input_ids.shape[-1])) * -1e4, diagonal=1
757757
)
758758
if attention_mask is not None:
759759
if len(attention_mask.shape) == 2:
@@ -972,7 +972,7 @@ def get_logits_processor(
972972

973973
def expand_inputs_for_generation(self, input_ids, expand_size, attention_mask=None, **model_kwargs):
974974

975-
index = paddle.tile(paddle.arange(paddle.shape(input_ids)[0]).unsqueeze(-1), [1, expand_size]).reshape([-1])
975+
index = paddle.tile(paddle.arange(input_ids.shape[0]).unsqueeze(-1), [1, expand_size]).reshape([-1])
976976

977977
input_ids = paddle.gather(input_ids, index)
978978

@@ -1109,11 +1109,11 @@ def TopPProcess(probs, top_p, min_tokens_to_keep):
11091109
probs = paddle.where(condition, paddle.full_like(probs, 0.0), probs)
11101110
return probs
11111111

1112-
batch_size, cur_len = paddle.shape(input_ids)
1112+
batch_size, cur_len = input_ids.shape
11131113
# used for compute on gpu, avoid memcpy D2H
11141114
cur_len_gpu = paddle.full([1], cur_len, dtype="int64")
11151115

1116-
origin_len = paddle.shape(input_ids)[1]
1116+
origin_len = input_ids.shape[1]
11171117
# used for compute on gpu, avoid memcpy D2H
11181118
origin_len_gpu = paddle.full([1], origin_len, dtype="int64")
11191119

@@ -1167,7 +1167,7 @@ def _post_process_(outputs, input_ids, cur_len, origin_len, scores, unfinished_f
11671167
raise ImportError(
11681168
"please install ppfleetx_ops by 'cd ppfleetx/ops && python setup_cuda.py install'!"
11691169
)
1170-
top_ps_tensor = paddle.full(shape=[paddle.shape(probs)[0]], fill_value=top_p, dtype=probs.dtype)
1170+
top_ps_tensor = paddle.full(shape=[probs.shape[0]], fill_value=top_p, dtype=probs.dtype)
11711171
# TODO fake random seed here
11721172
# Users should set the random seed dynamically when inference
11731173
_, next_tokens = topp_sampling(probs, top_ps_tensor, random_seed=100)
@@ -1299,7 +1299,7 @@ def forward(self, input_ids=None, **model_kwargs):
12991299

13001300
if model_kwargs.get("position_ids", None) is None:
13011301
model_kwargs["position_ids"] = paddle.arange(
1302-
0, paddle.shape(model_kwargs["attention_mask"])[-1], dtype=input_ids.dtype
1302+
0, model_kwargs["attention_mask"].shape[-1], dtype=input_ids.dtype
13031303
).unsqueeze(0)
13041304

13051305
self.is_encoder_decoder = False

model_zoo/gpt-3/ppfleetx/models/language_model/gpt/dygraph/hybrid_model.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -837,8 +837,8 @@ def forward(self, input_ids, position_ids=None, attention_mask=None, use_cache=F
837837
if position_ids is None:
838838
past_length = 0
839839
if cache is not None:
840-
past_length = paddle.shape(attention_mask)[-1] - 1
841-
position_ids = paddle.arange(past_length, paddle.shape(input_ids)[-1] + past_length, dtype=input_ids.dtype)
840+
past_length = attention_mask.shape[-1] - 1
841+
position_ids = paddle.arange(past_length, input_ids.shape[-1] + past_length, dtype=input_ids.dtype)
842842
position_ids = position_ids.unsqueeze(0)
843843
# .expand_as(input_ids)
844844
position_ids = paddle.expand_as(position_ids, input_ids)
@@ -851,7 +851,7 @@ def forward(self, input_ids, position_ids=None, attention_mask=None, use_cache=F
851851
if not self.fused_softmax_with_triangular or not paddle.is_compiled_with_cuda():
852852
# TODO, use registered buffer
853853
causal_mask = paddle.tensor.triu(
854-
paddle.ones((paddle.shape(input_ids)[-1], paddle.shape(input_ids)[-1])) * -1e4, diagonal=1
854+
paddle.ones((input_ids.shape[-1], input_ids.shape[-1])) * -1e4, diagonal=1
855855
)
856856
if attention_mask is not None:
857857
if len(attention_mask.shape) == 2:
@@ -1304,7 +1304,7 @@ def get_logits_processor(
13041304

13051305
def expand_inputs_for_generation(self, input_ids, expand_size, attention_mask=None, **model_kwargs):
13061306

1307-
index = paddle.tile(paddle.arange(paddle.shape(input_ids)[0]).unsqueeze(-1), [1, expand_size]).reshape([-1])
1307+
index = paddle.tile(paddle.arange(input_ids.shape[0]).unsqueeze(-1), [1, expand_size]).reshape([-1])
13081308

13091309
input_ids = paddle.gather(input_ids, index)
13101310

model_zoo/gpt-3/ppfleetx/models/language_model/gpt/dygraph/single_model.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -602,8 +602,8 @@ def forward(self, input_ids, position_ids=None, attention_mask=None, use_cache=F
602602
if position_ids is None:
603603
past_length = 0
604604
if cache is not None:
605-
past_length = paddle.shape(attention_mask)[-1] - 1
606-
position_ids = paddle.arange(past_length, paddle.shape(input_ids)[-1] + past_length, dtype=input_ids.dtype)
605+
past_length = attention_mask.shape[-1] - 1
606+
position_ids = paddle.arange(past_length, input_ids.shape[-1] + past_length, dtype=input_ids.dtype)
607607
position_ids = position_ids.unsqueeze(0)
608608
# .expand_as(input_ids)
609609
position_ids = paddle.expand_as(position_ids, input_ids)
@@ -615,7 +615,7 @@ def forward(self, input_ids, position_ids=None, attention_mask=None, use_cache=F
615615
if not self.fused_softmax_with_triangular or not paddle.is_compiled_with_cuda():
616616
# TODO, use registered buffer
617617
causal_mask = paddle.tensor.triu(
618-
paddle.ones((paddle.shape(input_ids)[-1], paddle.shape(input_ids)[-1])) * -1e4, diagonal=1
618+
paddle.ones((input_ids.shape[-1], input_ids.shape[-1])) * -1e4, diagonal=1
619619
)
620620
if attention_mask is not None:
621621
if len(attention_mask.shape) == 2:
@@ -848,7 +848,7 @@ def get_logits_processor(
848848

849849
def expand_inputs_for_generation(self, input_ids, expand_size, attention_mask=None, **model_kwargs):
850850

851-
index = paddle.tile(paddle.arange(paddle.shape(input_ids)[0]).unsqueeze(-1), [1, expand_size]).reshape([-1])
851+
index = paddle.tile(paddle.arange(input_ids.shape[0]).unsqueeze(-1), [1, expand_size]).reshape([-1])
852852

853853
input_ids = paddle.gather(input_ids, index)
854854

@@ -1039,7 +1039,7 @@ def _post_process_(outputs, input_ids, cur_len, origin_len, scores, unfinished_f
10391039
raise ImportError(
10401040
"please install ppfleetx_ops by 'cd ppfleetx/ops && python setup_cuda.py install'!"
10411041
)
1042-
top_ps_tensor = paddle.full(shape=[paddle.shape(probs)[0]], fill_value=top_p, dtype=probs.dtype)
1042+
top_ps_tensor = paddle.full(shape=[probs.shape[0]], fill_value=top_p, dtype=probs.dtype)
10431043
_, next_tokens = topp_sampling(probs, top_ps_tensor, random_seed=100)
10441044
else:
10451045
probs = TopPProcess(probs, top_p, min_tokens_to_keep)
@@ -1194,7 +1194,7 @@ def forward(self, input_ids=None, **model_kwargs):
11941194

11951195
if model_kwargs.get("position_ids", None) is None:
11961196
model_kwargs["position_ids"] = paddle.arange(
1197-
0, paddle.shape(model_kwargs["attention_mask"])[-1], dtype=input_ids.dtype
1197+
0, model_kwargs["attention_mask"].shape[-1], dtype=input_ids.dtype
11981198
).unsqueeze(0)
11991199

12001200
self.is_encoder_decoder = False

paddlenlp/generation/utils.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -412,9 +412,9 @@ def get_logits_processor(
412412
@staticmethod
413413
def expand_inputs_for_generation(input_ids, expand_size, attention_mask=None, **model_kwargs):
414414

415-
index = paddle.tile(
416-
paddle.arange(paddle.shape(input_ids)[0], dtype="int64").unsqueeze(-1), [1, expand_size]
417-
).reshape([-1])
415+
index = paddle.tile(paddle.arange(input_ids.shape[0], dtype="int64").unsqueeze(-1), [1, expand_size]).reshape(
416+
[-1]
417+
)
418418

419419
input_ids = paddle.gather(input_ids, index)
420420

@@ -1340,11 +1340,11 @@ def sample_d2s(
13401340
"you should not specify InputSpec for top_k and top_p parameters, one of InputSpec is expected"
13411341
)
13421342

1343-
batch_size, cur_len = paddle.shape(input_ids)
1343+
batch_size, cur_len = input_ids.shape
13441344
# used for compute on gpu, avoid memcpy D2H
13451345
cur_len_gpu = paddle.full([1], cur_len, dtype="int64")
13461346

1347-
origin_len = paddle.shape(input_ids)[1]
1347+
origin_len = input_ids.shape[1]
13481348
# used for compute on gpu, avoid memcpy D2H
13491349
origin_len_gpu = paddle.full([1], origin_len, dtype="int64")
13501350

@@ -1384,7 +1384,7 @@ def _post_process_(outputs, input_ids, cur_len, origin_len, scores, unfinished_f
13841384
# compute next_tokens
13851385
if use_top_p:
13861386
logits = logits / temperature
1387-
top_ps_tensor = paddle.full(shape=[paddle.shape(probs)[0], 1], fill_value=top_p, dtype=probs.dtype)
1387+
top_ps_tensor = paddle.full(shape=[probs.shape[0], 1], fill_value=top_p, dtype=probs.dtype)
13881388
_, next_tokens = paddle.tensor.top_p_sampling(probs, top_ps_tensor)
13891389
else:
13901390
probs = TopKProcess(probs, top_k, min_tokens_to_keep)
@@ -1428,7 +1428,7 @@ def _post_process_(outputs, input_ids, cur_len, origin_len, scores, unfinished_f
14281428

14291429
attn_mask = model_kwargs["attention_mask"]
14301430
# make the shape of attention_mask = (-1, -1, -1, -1) in dy2static.
1431-
model_kwargs["attention_mask"] = paddle.reshape(attn_mask, paddle.shape(attn_mask))
1431+
model_kwargs["attention_mask"] = paddle.reshape(attn_mask, attn_mask.shape)
14321432
model_kwargs["cache"] = outputs[1] if isinstance(outputs, tuple) else None
14331433
max_new_tokens = paddle.full([1], max_new_tokens + cur_len - 1, dtype="int64")
14341434

paddlenlp/layers/crf.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ def __init__(self, transitions, with_start_stop_tag=True):
303303
if with_start_stop_tag:
304304
self.start_idx = -1
305305
self.stop_idx = -2
306-
self.num_tags = paddle.shape(transitions)[0]
306+
self.num_tags = transitions.shape[0]
307307

308308
self._initial_alpha = None
309309
self._index = None
@@ -312,7 +312,7 @@ def __init__(self, transitions, with_start_stop_tag=True):
312312

313313
def _initialize_alpha(self, batch_size):
314314
# alpha accumulate the path value to get the different next tag
315-
if self._initial_alpha is None or batch_size > paddle.shape(self._initial_alpha)[0]:
315+
if self._initial_alpha is None or batch_size > self._initial_alpha.shape[0]:
316316
# Initialized by a small value.
317317
initial_alpha = paddle.full([batch_size, self.num_tags - 1], dtype="float32", fill_value=-10000.0)
318318
# alpha_start fill_value = 0. > -10000., means the first one step START gets the most score.
@@ -336,7 +336,7 @@ def forward(self, inputs, lengths):
336336
The `paths` tensor containing the highest scoring tag indices.
337337
Its dtype is int64 and has a shape of `[batch_size, sequence_length]`.
338338
"""
339-
input_shape = paddle.shape(inputs)
339+
input_shape = inputs.shape
340340
batch_size = input_shape[0]
341341
n_label = input_shape[2]
342342

@@ -412,6 +412,6 @@ def forward(self, inputs, lengths):
412412
return scores, batch_path
413413

414414
def _get_batch_index(self, batch_size):
415-
if self._batch_index is None or batch_size != paddle.shape(self._batch_index)[0]:
415+
if self._batch_index is None or batch_size != self._batch_index.shape[0]:
416416
self._batch_index = paddle.arange(end=batch_size, dtype="int64")
417417
return self._batch_index

0 commit comments

Comments
 (0)