[net] # Training batch=64 subdivisions=16 # Testing # batch=1 # subdivisions=1 width=384 height=384 channels=3 momentum=0.9 decay=0.0005 angle=0 flip=0 saturation = 1.5 exposure = 1.5 hue=.1 learning_rate=0.001 burn_in=1000 max_batches = 65000 policy=steps steps=26000,45500 scales=.1,.1 #weights_reject_freq=1001 #ema_alpha=0.9998 #equidistant_point=1000 #num_sigmas_reject_badlabels=3 #badlabels_rejection_percentage=0.2 #cutmix=1 mosaic=1 #:384x384 30:12x12 37:24x24 44:48x48 for 384 # Downsample [convolutional] batch_normalize=1 filters=32 size=3 stride=2 pad=1 activation=leaky # Downsample [convolutional] batch_normalize=1 filters=64 size=3 stride=2 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=64 size=3 stride=1 pad=1 activation=leaky [route] layers=-1 groups=2 group_id=1 [convolutional] batch_normalize=1 filters=32 size=3 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=32 size=3 stride=1 pad=1 activation=leaky [route] layers = -1,-2 [convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=leaky [route] layers = -6,-1 [maxpool] size=2 stride=2 [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=leaky [route] layers=-1 groups=2 group_id=1 [convolutional] batch_normalize=1 filters=64 size=3 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=64 size=3 stride=1 pad=1 activation=leaky [route] layers = -1,-2 [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [route] layers = -6,-1 [maxpool] size=2 stride=2 [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [route] layers=-1 groups=2 group_id=1 [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=leaky [route] layers = -1,-2 [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [route] layers = -6,-1 [maxpool] size=2 stride=2 [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [convolutional] size=1 stride=1 pad=1 filters=18 activation=linear [yolo] mask = 6,7,8 anchors = 100, 19, 128, 28, 155, 41, 221, 51, 171, 76, 272, 70, 305, 96, 330,134, 326,228 classes=1 num=9 jitter=.3 scale_x_y = 1.05 cls_normalizer=1.0 iou_normalizer=0.07 iou_loss=ciou ignore_thresh = .7 truth_thresh = 1 random=1 resize=1.5 nms_kind=greedynms beta_nms=0.6 #new_coords=1 #scale_x_y = 2.0 [route] layers = -4 [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [upsample] stride=2 [route] layers = -1, 23 [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [convolutional] size=1 stride=1 pad=1 filters=18 activation=linear [yolo] mask = 3,4,5 anchors = 100, 19, 128, 28, 155, 41, 221, 51, 171, 76, 272, 70, 305, 96, 330,134, 326,228 classes=1 num=9 jitter=.3 scale_x_y = 1.05 cls_normalizer=1.0 iou_normalizer=0.07 iou_loss=ciou ignore_thresh = .7 truth_thresh = 1 random=1 resize=1.5 nms_kind=greedynms beta_nms=0.6 #new_coords=1 #scale_x_y = 2.0 [route] layers = -3 [convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=leaky [upsample] stride=2 [route] layers = -1, 15 [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=leaky [convolutional] size=1 stride=1 pad=1 filters=18 activation=linear [yolo] mask = 0,1,2 anchors = 100, 19, 128, 28, 155, 41, 221, 51, 171, 76, 272, 70, 305, 96, 330,134, 326,228 classes=1 num=9 jitter=.3 scale_x_y = 1.05 cls_normalizer=1.0 iou_normalizer=0.07 iou_loss=ciou ignore_thresh = .7 truth_thresh = 1 random=1 resize=1.5 nms_kind=greedynms beta_nms=0.6 #new_coords=1 #scale_x_y = 2.0