skip to Main Content

I am running a deep learning project at https://github.com/hyangwinter/flownet3d_pytorch. There are some problems during operation, and I don’t know how to solve them.

The error is as follows:

Namespace(batch_size=64, cycle=False, dataset='SceneflowDataset', dataset_path='data/data_processed_maxcut_35_20k_2k_8192/', dropout=0.5, emb_dims=512, epochs=250, eval=False, exp_name='flownet3d', gaussian_noise=False, lr=0.001, model='flownet', model_path='', momentum=0.9, no_cuda=False, num_points=2048, seed=1234, test_batch_size=32, unseen=False, use_sgd=False)
    train :  20006
    test :  2007
    Traceback (most recent call last):
    File "main.py", line 282, in <module>
        main()
    File "main.py", line 254, in main
        net = FlowNet3D(args).cuda()
    File "/home/ubuntu/project/flownet3d_pytorch/model.py", line 13, in __init__
        self.sa1 = PointNetSetAbstraction(npoint=1024, radius=0.5, nsample=16, in_channel=3, mlp=[32,32,64], group_all=False)
    File "/home/ubuntu/project/flownet3d_pytorch/util.py", line 225, in __init__
        for out_channel in mlp2:
    TypeError: 'NoneType' object is not iterable`

My code:

class FlowNet3D(nn.Module):

    def __init__(self,args):
        super(FlowNet3D,self).__init__()

        self.sa1 = PointNetSetAbstraction(npoint=1024, radius=0.5, nsample=16, in_channel=3, mlp=[32,32,64], group_all=False)
        self.sa2 = PointNetSetAbstraction(npoint=256, radius=1.0, nsample=16, in_channel=64, mlp=[64, 64, 128], group_all=False)
        self.sa3 = PointNetSetAbstraction(npoint=64, radius=2.0, nsample=8, in_channel=128, mlp=[128, 128, 256], group_all=False)
        self.sa4 = PointNetSetAbstraction(npoint=16, radius=4.0, nsample=8, in_channel=256, mlp=[256,256,512], group_all=False)
    
        self.fe_layer = FlowEmbedding(radius=10.0, nsample=64, in_channel = 128, mlp=[128, 128, 128], pooling='max', corr_func='concat')
    
        self.su1 = PointNetSetUpConv(nsample=8, radius=2.4, f1_channel = 256, f2_channel = 512, mlp=[], mlp2=[256, 256])
        self.su2 = PointNetSetUpConv(nsample=8, radius=1.2, f1_channel = 128+128, f2_channel = 256, mlp=[128, 128, 256], mlp2=[256])
        self.su3 = PointNetSetUpConv(nsample=8, radius=0.6, f1_channel = 64, f2_channel = 256, mlp=[128, 128, 256], mlp2=[256])
        self.fp = PointNetFeaturePropogation(in_channel = 256+3, mlp = [256, 256])
    
        self.conv1 = nn.Conv1d(256, 128, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm1d(128)
        self.conv2=nn.Conv1d(128, 3, kernel_size=1, bias=True)
    
    def forward(self, pc1, pc2, feature1, feature2):
        l1_pc1, l1_feature1 = self.sa1(pc1, feature1)
        l2_pc1, l2_feature1 = self.sa2(l1_pc1, l1_feature1)
    
        l1_pc2, l1_feature2 = self.sa1(pc2, feature2)
        l2_pc2, l2_feature2 = self.sa2(l1_pc2, l1_feature2)
    
        _, l2_feature1_new = self.fe_layer(l2_pc1, l2_pc2, l2_feature1, l2_feature2)

        l3_pc1, l3_feature1 = self.sa3(l2_pc1, l2_feature1_new)
        l4_pc1, l4_feature1 = self.sa4(l3_pc1, l3_feature1)
    
        l3_fnew1 = self.su1(l3_pc1, l4_pc1, l3_feature1, l4_feature1)
        l2_fnew1 = self.su2(l2_pc1, l3_pc1, torch.cat([l2_feature1, l2_feature1_new], dim=1), l3_fnew1)
        l1_fnew1 = self.su3(l1_pc1, l2_pc1, l1_feature1, l2_fnew1)
        l0_fnew1 = self.fp(pc1, l1_pc1, feature1, l1_fnew1)
    
        x = F.relu(self.bn1(self.conv1(l0_fnew1)))
        sf = self.conv2(x)
        return sf

class PointNetSetAbstraction(nn.Module):
    def __init__(self, npoint, radius, nsample, in_channel, mlp, mlp2 = None, group_all = False):
        super(PointNetSetAbstraction, self).__init__()
        self.npoint = npoint
        self.radius = radius
        self.nsample = nsample
        self.group_all = group_all
        self.mlp_convs = nn.ModuleList()
        self.mlp_bns = nn.ModuleList()
        self.mlp2_convs = nn.ModuleList()
        last_channel = in_channel+3
        for out_channel in mlp:
            self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias = False))
            self.mlp_bns.append(nn.BatchNorm2d(out_channel))
            last_channel = out_channel
        for out_channel in mlp2:
            self.mlp2_convs.append(nn.Sequential(nn.Conv1d(last_channel, out_channel, 1, bias=False),
                                                nn.BatchNorm1d(out_channel)))
            last_channel = out_channel
        if group_all:
            self.queryandgroup = pointutils.GroupAll()
        else:
            self.queryandgroup = pointutils.QueryAndGroup(radius, nsample)
    def forward(self, xyz, points):
        """
        Input:
            xyz: input points position data, [B, C, N]
            points: input points data, [B, D, N]
        Return:
            new_xyz: sampled points position data, [B, S, C]
            new_points_concat: sample points feature data, [B, S, D']
        """
        device = xyz.device
        B, C, N = xyz.shape
        xyz_t = xyz.permute(0, 2, 1).contiguous()
        # if points is not None:
        #     points = points.permute(0, 2, 1).contiguous()

        if self.group_all == False:
            fps_idx = pointutils.furthest_point_sample(xyz_t, self.npoint)  # [B, N]
            new_xyz = pointutils.gather_operation(xyz, fps_idx)  # [B, C, N]
        else:
            new_xyz = xyz
        new_points = self.queryandgroup(xyz_t, new_xyz.transpose(2, 1).contiguous(), points) # [B, 3+C, N, S]
    
        # new_xyz: sampled points position data, [B, C, npoint]
        # new_points: sampled points data, [B, C+D, npoint, nsample]
        for i, conv in enumerate(self.mlp_convs):
            bn = self.mlp_bns[i]
            new_points =  F.relu(bn(conv(new_points)))

        new_points = torch.max(new_points, -1)[0]

        for i, conv in enumerate(self.mlp2_convs):
            new_points = F.relu(conv(new_points))
        return new_xyz, new_points`

I noticed that the mlp2 parameter is set to NONE but I don’t know how to solve it. In the source code implementation, this parameter is also NONE

2

Answers


  1. Since your self.sa1 do not have parameters named mlp2, it will use the default one, None as you defined in

    class PointNetSetAbstraction(nn.Module): 
        def init(self, npoint, radius, nsample, in_channel, mlp, mlp2 = None, group_all = False):
    

    And of cause None is not iterable, to solve this, you can just add a if-condition before the for-loop. Otherwise, you can also just pass mlp2=[] as you already done for self.su1.

    Login or Signup to reply.
  2. As you pointed out in your question, mlp2 is set to None in the function call. When trying to iterate over mlp2 with

    for out_channel in mlp2:
    

    the code corectly throws a TypeError.

    It is hard to understand what you are trying to do, especially because mlp2 is not a good variable name (what even should that be?). From the context of your code I would assume that mpl2 is a list that holds the sizes of your layers. You might try something like:

    mlp2 = [16,16,16,16]
    

    This will initialize the Model to have layers of size 16 (if the rest of the code is working).

    Login or Signup to reply.
Please signup or login to give your own answer.
Back To Top
Search