本文共 1883 字,大约阅读时间需要 6 分钟。
> for name, param in self.named_parameters(): >> if name in ['bias']: >> print(param.size())
train(mode: bool = True) → T # 正常使用Batch Normalization和Dropouteval() → T # 不会使用Batch Normalization和Dropout
torch.nn.Module
add_module(name: str, module: torch.nn.modules.module.Module) → None
apply(fn: Callable[Module, None]) → T
bfloat16() → T
buffers(recurse: bool = True) → Iterator[torch.Tensor]
children() → Iterator[torch.nn.modules.module.Module]
cpu() → T
cuda(device: Union[int, torch.device, None] = None) → T
double() → T
dump_patches: BOOL = FALSE
eval() → T
extra_repr() → str
float() → T
half() → T
load_state_dict(state_dict: Dict[str, torch.Tensor], strict: bool = True)
modules() → Iterator[torch.nn.modules.module.Module]
named_buffers(prefix: str = ‘’, recurse: bool = True) → Iterator[Tuple[str, torch.Tensor]]
named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
named_modules(memo: Optional[Set[Module]] = None, prefix: str = ‘’)
named_parameters(prefix: str = ‘’, recurse: bool = True) → Iterator[Tuple[str, torch.Tensor]]
parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, …], torch.Tensor], Union[Tuple[torch.Tensor, …], torch.Tensor]], Union[None, torch.Tensor]]) → torch.utils.hooks.RemovableHandle
register_buffer(name: str, tensor: torch.Tensor, persistent: bool = True) → None
register_forward_hook(hook: Callable[…, None]) → torch.utils.hooks.RemovableHandle
register_forward_pre_hook(hook: Callable[…, None]) → torch.utils.hooks.RemovableHandle
register_parameter(name: str, param: torch.nn.parameter.Parameter) → None
requires_grad_(requires_grad: bool = True) → T
state_dict(destination=None, prefix=’’, keep_vars=False)
to(*args, **kwargs)
train(mode: bool = True) → T
type(dst_type: Union[torch.dtype, str]) → T
zero_grad() → None
转载地址:http://ozrfk.baihongyu.com/