Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/sharp/models/initializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def __init__(
self.feature_input_stop_grad = feature_input_stop_grad

def prepare_feature_input(self, image: torch.Tensor, depth: torch.Tensor) -> torch.Tensor:
"""Prepare the feature input to the Guassian predictor."""
"""Prepare the feature input to the Gaussian predictor."""
if self.feature_input_stop_grad:
image = image.detach()
depth = depth.detach()
Expand Down
2 changes: 1 addition & 1 deletion src/sharp/models/monodepth.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def __init__(
nn.ReLU(),
)

# Set the final convoultion layer's bias to be 0.
# Set the final convolution layer's bias to be 0.
self.head[4].bias.data.fill_(0)

self.grad_checkpointing = False
Expand Down
4 changes: 2 additions & 2 deletions src/sharp/models/predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def forward(
# +------+-------+
# |
# +-------+--------+ # Optionally align monodepth to ground truth
# |depth_alignement| # with a local scale map.
# |depth_alignment| # with a local scale map.
# +-------+--------+
# |
# v
Expand Down Expand Up @@ -169,7 +169,7 @@ def forward(
#

# The logic to decide whether to align monodepth to the ground truth is wrapped
# in a submodule 'DepthAlignement' to facilitate the symbolic tracing of the
# in a submodule 'DepthAlignment' to facilitate the symbolic tracing of the
# predictor. This way, the depth alignment submodule containing the conditional
# logic can be excluded during the tracing and the graph of the predictors is
# static.
Expand Down