diff --git a/example.py b/example.py new file mode 100644 index 0000000..b71528e --- /dev/null +++ b/example.py @@ -0,0 +1,55 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from trace_commentor import Commentor + + +class Net(nn.Module): + + def __init__(self): + super(Net, self).__init__() + # 1 input image channel, 6 output channels, 5x5 square convolution + # kernel + self.conv1 = nn.Conv2d(1, 6, 5) + self.conv2 = nn.Conv2d(6, 16, 5) + # an affine operation: y = Wx + b + self.fc1 = nn.Linear(16 * 5 * 5, 120) # 5*5 from image dimension + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + @Commentor() + def forward(self, input): + # Convolution layer C1: 1 input image channel, 6 output channels, + # 5x5 square convolution, it uses RELU activation function, and + # outputs a Tensor with size (N, 6, 28, 28), where N is the size of the batch + c1 = F.relu(self.conv1(input)) + # Subsampling layer S2: 2x2 grid, purely functional, + # this layer does not have any parameter, and outputs a (N, 16, 14, 14) Tensor + s2 = F.max_pool2d(c1, (2, 2)) + # Convolution layer C3: 6 input channels, 16 output channels, + # 5x5 square convolution, it uses RELU activation function, and + # outputs a (N, 16, 10, 10) Tensor + c3 = F.relu(self.conv2(s2)) + # Subsampling layer S4: 2x2 grid, purely functional, + # this layer does not have any parameter, and outputs a (N, 16, 5, 5) Tensor + s4 = F.max_pool2d(c3, 2) + # Flatten operation: purely functional, outputs a (N, 400) Tensor + s4 = torch.flatten(s4, 1) + # Fully connected layer F5: (N, 400) Tensor input, + # and outputs a (N, 120) Tensor, it uses RELU activation function + f5 = F.relu(self.fc1(s4)) + # Fully connected layer F6: (N, 120) Tensor input, + # and outputs a (N, 84) Tensor, it uses RELU activation function + f6 = F.relu(self.fc2(f5)) + # Gaussian layer OUTPUT: (N, 84) Tensor input, and + # outputs a (N, 10) Tensor + output = self.fc3(f6) + return output + + +net = Net() + +input = torch.randn(1, 1, 32, 32) +out = net(input) +print(out) diff --git a/experiment.py b/experiment.py deleted file mode 100644 index f9ca756..0000000 --- a/experiment.py +++ /dev/null @@ -1,23 +0,0 @@ -import torch -import torch.nn as nn -from tests.test_utils import * - -def test(): - - @Commentor(_globals=globals()) - def target(): - x = torch.ones(4, 5) - for i in range(3): - x = x[..., None, :] - - a = torch.randn(309, 110, 3)[:100] - f = nn.Linear(3, 128) - b = f(a.reshape(-1, 3)).reshape(-1, 110, 128) - c = torch.concat((a, b), dim=-1) - - return c.flatten() - - target() - - -test() diff --git a/tests/test_torch.py b/tests/test_torch.py index 0fd971d..26eebaf 100644 --- a/tests/test_torch.py +++ b/tests/test_torch.py @@ -6,7 +6,7 @@ from test_utils import * def test_torch(): - @Commentor("", _globals=globals()) + @Commentor("") def target(): x = torch.ones(4, 5) diff --git a/trace_commentor/commentor.py b/trace_commentor/commentor.py index 7601dc2..497d434 100644 --- a/trace_commentor/commentor.py +++ b/trace_commentor/commentor.py @@ -1,7 +1,5 @@ import ast import inspect -import sys -import rich from inspect import getfullargspec from functools import wraps @@ -14,9 +12,9 @@ from .utils import sign, to_source, comment_to_file class Commentor(object): - def __init__(self, output="", _globals=dict(), fmt=[], check=True, _exit=True) -> None: + def __init__(self, output="", fmt=[], check=True, _exit=True) -> None: self._locals = dict() - self._globals = _globals + self._globals = inspect.stack()[1][0].f_globals self._return = None self._formatters = fmt + formatters.LIST self._lines = []