Skip to content

Commit 46d69fc

Browse files
authored
polish(xjy): delete unnecessary comments and translate CN comments into EN
1 parent 76611cf commit 46d69fc

File tree

3 files changed

+11
-23
lines changed

3 files changed

+11
-23
lines changed

lzero/entry/eval_muzero.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -60,13 +60,7 @@ def eval_muzero(
6060

6161
# load pretrained model
6262
if model_path is not None:
63-
logging.info(f"Loading pretrained model from {model_path}...")
6463
policy.learn_mode.load_state_dict(torch.load(model_path, map_location=cfg.policy.device))
65-
logging.info("Pretrained model loaded successfully!")
66-
else:
67-
logging.warning("model_path is None!!!")
68-
69-
# print(policy._learn_model.representation_network.pretrained_model.encoder.layer[0].attention.output.LayerNorm.weight)
7064

7165
# Create worker components: learner, collector, evaluator, replay buffer, commander.
7266
tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial'))

lzero/model/common.py

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -390,16 +390,6 @@ def __init__(self,
390390
# In distributed training, only the rank 0 process downloads the model, and other processes load from cache to speed up startup.
391391
if get_rank() == 0:
392392
self.pretrained_model = AutoModel.from_pretrained(model_path)
393-
394-
# layernorm_all_ones = True
395-
# for name, param in self.pretrained_model.named_parameters():
396-
# if 'LayerNorm.weight' in name:
397-
# # 检查所有值是否接近1。可以用torch.allclose进行判断:
398-
# if not torch.allclose(param, torch.ones_like(param), rtol=1e-05, atol=1e-08):
399-
# print(f"{name} is not all ones!")
400-
# layernorm_all_ones = False
401-
# if layernorm_all_ones:
402-
# print("All LayerNorm.weight parameters are all ones.")
403393

404394
if get_world_size() > 1:
405395
# Wait for rank 0 to finish loading the model.
@@ -422,8 +412,6 @@ def __init__(self,
422412
self.embedding_size = embedding_size
423413
self.embed_proj_head = nn.Linear(self.pretrained_model.config.hidden_size, self.embedding_size)
424414

425-
# self.sim_norm = SimNorm(simnorm_dim=group_size)
426-
427415
# # Select the normalization method based on the final_norm_option_in_encoder parameter.
428416
if final_norm_option_in_encoder.lower() == "simnorm":
429417
self.norm = SimNorm(simnorm_dim=group_size)
@@ -484,7 +472,6 @@ def __init__(
484472
norm_type: str = 'BN',
485473
embedding_dim: int = 256,
486474
group_size: int = 8,
487-
# final_norm_option_in_encoder: str = 'SimNorm',
488475
final_norm_option_in_encoder: str = 'LayerNorm', # TODO
489476
) -> None:
490477
"""
@@ -746,8 +733,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
746733
- output (:obj:`torch.Tensor`): :math:`(B, hidden_channels)`, where B is batch size.
747734
"""
748735
x = self.fc_representation(x)
749-
# TODO
750-
# x = self.sim_norm(x)
751736
x = self.norm(x)
752737

753738
return x

lzero/model/unizero_world_models/world_model.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def __init__(self, config: TransformerConfig, tokenizer) -> None:
8484
# Head modules
8585
self.head_rewards = self._create_head(self.act_tokens_pattern, self.support_size)
8686
self.head_observations = self._create_head(self.all_but_last_latent_state_pattern, self.obs_per_embdding_dim, \
87-
self._get_final_norm(self.final_norm_option_in_obs_head) # 使用指定的归一化方法
87+
self._get_final_norm(self.final_norm_option_in_obs_head) # using the specified normalization method
8888
# self.sim_norm
8989
) # NOTE: we add a sim_norm to the head for observations
9090
if self.continuous_action_space:
@@ -158,7 +158,7 @@ def custom_init(module):
158158

159159
def _get_final_norm(self, norm_option: str) -> nn.Module:
160160
"""
161-
根据指定的归一化选项返回相应的归一化模块。
161+
Return the corresponding normalization module based on the specified normalization option.
162162
"""
163163
if norm_option == 'LayerNorm':
164164
return nn.LayerNorm(self.config.embed_dim, eps=1e-5)
@@ -1299,6 +1299,15 @@ def compute_loss(self, batch, target_tokenizer: Tokenizer = None, inverse_scalar
12991299
# Encode observations into latent state representations
13001300
obs_embeddings = self.tokenizer.encode_to_obs_embeddings(batch['observations'])
13011301

1302+
# ========= for visual analysis =========
1303+
# Uncomment the lines below for visual analysis in Pong
1304+
# self.plot_latent_tsne_each_and_all_for_pong(obs_embeddings, suffix='pong_H10_H4_tsne')
1305+
# self.save_as_image_with_timestep(batch['observations'], suffix='pong_H10_H4_tsne')
1306+
# Uncomment the lines below for visual analysis in visual match
1307+
# self.plot_latent_tsne_each_and_all(obs_embeddings, suffix='visual_match_memlen1-60-15_tsne')
1308+
# self.save_as_image_with_timestep(batch['observations'], suffix='visual_match_memlen1-60-15_tsne')
1309+
1310+
13021311
# ========= logging for analysis =========
13031312
if self.analysis_dormant_ratio:
13041313
# Calculate dormant ratio of the encoder

0 commit comments

Comments
 (0)