1

Ich bemerke, dass es upsample Schichten in SegNet gibt, seine eigenen Bilder sind 480 * 360, wenn ich meine Bilder (565 * 584) verwenden möchte, trete ich die folgende Störung auf:wie man die upsample Schicht von SegNet zu verstehen

I0929 03:58:06.238135 22750 net.cpp:368] upsample4 -> pool4_D 
I0929 03:58:06.238142 22750 net.cpp:120] Setting up upsample4 
F0929 03:58:06.238164 22750 upsample_layer.cpp:63] Check failed: bottom[0]->height() == bottom[1]->height() (38 vs. 37) 

hier ist die Definition:

layer { 
    name: "upsample4" 
    type: "Upsample" 
    bottom: "conv5_1_D" 
    top: "pool4_D" 
    bottom: "pool4_mask" 
    upsample_param { 
    scale: 2 
    upsample_w: 60 
    upsample_h: 45 
    } 
} 

ich denke, ich sollte upsample_w und upsample_h ändern, aber ich weiß nicht, die genaue value.Can jeder Körper sagt mir die Beziehung zwischen scaleupsample_wupsample_h und Größe Bilder oder wie kalkuliere es.

die ganze Definition des Netzes: segnet_train.prototxt

name: "VGG_ILSVRC_16_layer" 
layer { 
    name: "data" 
    type: "DenseImageData" 
    top: "data" 
    top: "label" 
    dense_image_data_param { 
    source: "/home/zhaimo/SegNet/CamVid/mytrain.txt" # Change this to the absolute path to your data file 
    batch_size: 4    # Change this number to a batch size that will fit on your GPU 
    shuffle: true 
    } 
} 
layer { 
    bottom: "data" 
    top: "conv1_1" 
    name: "conv1_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 64 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv1_1" 
    top: "conv1_1" 
    name: "conv1_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv1_1" 
    top: "conv1_1" 
    name: "relu1_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv1_1" 
    top: "conv1_2" 
    name: "conv1_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 64 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv1_2" 
    top: "conv1_2" 
    name: "conv1_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv1_2" 
    top: "conv1_2" 
    name: "relu1_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv1_2" 
    top: "pool1" 
    top: "pool1_mask" 
    name: "pool1" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool1" 
    top: "conv2_1" 
    name: "conv2_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 128 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv2_1" 
    top: "conv2_1" 
    name: "conv2_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv2_1" 
    top: "conv2_1" 
    name: "relu2_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv2_1" 
    top: "conv2_2" 
    name: "conv2_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 128 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv2_2" 
    top: "conv2_2" 
    name: "conv2_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv2_2" 
    top: "conv2_2" 
    name: "relu2_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv2_2" 
    top: "pool2" 
    top: "pool2_mask" 
    name: "pool2" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool2" 
    top: "conv3_1" 
    name: "conv3_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv3_1" 
    top: "conv3_1" 
    name: "conv3_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv3_1" 
    top: "conv3_1" 
    name: "relu3_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv3_1" 
    top: "conv3_2" 
    name: "conv3_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv3_2" 
    top: "conv3_2" 
    name: "conv3_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv3_2" 
    top: "conv3_2" 
    name: "relu3_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv3_2" 
    top: "conv3_3" 
    name: "conv3_3" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv3_3" 
    top: "conv3_3" 
    name: "conv3_3_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv3_3" 
    top: "conv3_3" 
    name: "relu3_3" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv3_3" 
    top: "pool3" 
    top: "pool3_mask" 
    name: "pool3" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool3" 
    top: "conv4_1" 
    name: "conv4_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv4_1" 
    top: "conv4_1" 
    name: "conv4_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv4_1" 
    top: "conv4_1" 
    name: "relu4_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv4_1" 
    top: "conv4_2" 
    name: "conv4_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv4_2" 
    top: "conv4_2" 
    name: "conv4_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv4_2" 
    top: "conv4_2" 
    name: "relu4_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv4_2" 
    top: "conv4_3" 
    name: "conv4_3" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv4_3" 
    top: "conv4_3" 
    name: "conv4_3_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv4_3" 
    top: "conv4_3" 
    name: "relu4_3" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv4_3" 
    top: "pool4" 
    top: "pool4_mask" 
    name: "pool4" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool4" 
    top: "conv5_1" 
    name: "conv5_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv5_1" 
    top: "conv5_1" 
    name: "conv5_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv5_1" 
    top: "conv5_1" 
    name: "relu5_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv5_1" 
    top: "conv5_2" 
    name: "conv5_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv5_2" 
    top: "conv5_2" 
    name: "conv5_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv5_2" 
    top: "conv5_2" 
    name: "relu5_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv5_2" 
    top: "conv5_3" 
    name: "conv5_3" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv5_3" 
    top: "conv5_3" 
    name: "conv5_3_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv5_3" 
    top: "conv5_3" 
    name: "relu5_3" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv5_3" 
    top: "pool5" 
    top: "pool5_mask" 
    name: "pool5" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    name: "upsample5" 
    type: "Upsample" 
    bottom: "pool5" 
    top: "pool5_D" 
    bottom: "pool5_mask" 
    upsample_param { 
    scale: 2 
    upsample_w: 30 
    upsample_h: 23 
    } 
} 
....(The rest is omitted) 
+0

Der Fehler, den Sie bekommen, ist, weil die 'Form' von' conv5_1_D' ** anders ** ist als die 'Form' von' pool4_mask': Sie haben unterschiedliche 'Höhe'. – Shai

Antwort

0

Sie upsample_w und upsample_h ändern sollte. alle Pool layer dekresse dein Bild X2. also sollten Sie zählen, wie viele Schichten Sie haben und dann berechnen Sie Ihre upsample unter Berücksichtigung der Größe Ihrer Bilder.

Verwandte Themen