on
on

2021/10/25

Author:

LeetCode题两道

class Solution {public:
    struct Status {
        int lSum, rSum, mSum, iSum;
    };
    Status pushUp(Status l, Status r) {
        int iSum = l.iSum + r.iSum;
        int lSum = max(l.lSum, l.iSum + r.lSum);
        int rSum = max(r.rSum, r.iSum + l.rSum);
        int mSum = max(max(l.mSum, r.mSum), l.rSum + r.lSum);
        return (Status) {lSum, rSum, mSum, iSum};
    };
    Status get(vector<int> &a, int l, int r) {
        if (l == r) {
            return (Status) {a[l], a[l], a[l], a[l]};
        }
        int m = (l + r) >> 1;
        Status lSub = get(a, l, m);
        Status rSub = get(a, m + 1, r);
        return pushUp(lSub, rSub);
    }
    int maxSubArray(vector<int>& nums) {
        return get(nums, 0, nums.size() - 1).mSum;
    }
};

class Solution {
public:
    vector<int> getTriggerTime(vector<vector<int>>& increase, vector<vector<int>>& requirements) {
        // day x source
        vector<pair<int, int>> c_req, r_req, h_req;
        vector<int> is_ok(requirements.size(), 0);
        vector<int> results(requirements.size(), -1);
        int size = requirements.size();
        for(int i = 0; i < requirements.size(); i++){
            c_req.push_back({requirements[i][0], i});
            r_req.push_back({requirements[i][1], i});
            h_req.push_back({requirements[i][2], i});
        }
        sort(c_req.begin(), c_req.end(), [](pair<int, int>& x, pair<int, int>& y)->bool {return x.first < y.first;});
        sort(r_req.begin(), r_req.end(), [](pair<int, int>& x, pair<int, int>& y)->bool {return x.first < y.first;});
        sort(h_req.begin(), h_req.end(), [](pair<int, int>& x, pair<int, int>& y)->bool {return x.first < y.first;});
        int c_ind = 0, r_ind = 0, h_ind = 0;
        int c_now = 0, r_now = 0, h_now = 0;
        increase.insert(begin(increase) ,{0, 0, 0});
        for(int i = 0; i < increase.size(); i ++){
            c_now += increase[i][0];
            r_now += increase[i][1];
            h_now += increase[i][2];
            int t = min(size-1, c_ind);
            while(c_now >= c_req[t].first && c_ind < size){
                c_ind ++;
                is_ok[c_req[t].second] ++;
                if(is_ok[c_req[t].second] == 3){
                    results[c_req[t].second] = i;
                }
                t = min(size-1, c_ind);
            }
            t = min(size-1, r_ind);
            while(r_now >= r_req[t].first && r_ind < size){
                r_ind ++;
                is_ok[r_req[t].second] ++;
                if(is_ok[r_req[t].second] == 3){
                    results[r_req[t].second] = i;
                }
                t = min(size-1, r_ind);
            }
            t = min(size-1, h_ind);
            while(h_now >= h_req[t].first && h_ind < size){
                h_ind ++;
                is_ok[h_req[t].second] ++;
                if(is_ok[h_req[t].second] == 3){
                    results[h_req[t].second] = i;
                }
                t = min(size-1, h_ind);
            }
        }
        return results;
    }
};


Det3D框架解析

det3d/datasets/utils/create_gt_database.py 用于数据增强,生成对应帧中部分物体的点云结果

DataLoader

数据处理流程

graph LR
A[LoadPointCloudFromFile] –>B[LoadPointCloudAnnotations]
B –> C[Preprocess]
C –>D[Voxelization]
D –>E[AssignLabel]
E –> F[Reformat]

数据集总读取文件 det3d/datasets/nuscenes: class NuScenesDataset(PointCloudDataset)

Preprocess

文件目录:det3d/datasets/pipeline/preprocess.py

config中,train_preprocessor给出了preprocess的参数。

参数解析:

shuffle_points: 重新打乱点云的顺序

random_flip_both: 无参数操作,随机对x,y轴进行翻转,概率0。5

global_rotation: 以均匀分布采样的形式,对给出的旋转区间内的点进行一个随机的旋转

global_scaling_v2: 以均匀分布采样的形式,对坐标,长宽高等进行线性放缩

global_translate: 以正态分布的形式进行采样,对translation进行一个位移的添加

Voxelization

配置文件对应config中的voxel_generator

voxelnet中:

voxel_generator = dict(
range=[-54, -54, -5.0, 54, 54, 3.0],
voxel_size=[0.075, 0.075, 0.2],
max_points_in_voxel=10,
max_voxel_num=[120000, 160000],
)

返回类型:

res["lidar"]["voxels"] = dict(
voxels=voxels,
coordinates=coordinates,
num_points=num_points,
num_voxels=num_voxels,
shape=grid_size,
range=pc_range,
size=voxel_size
)

Voxelization的过程遵循先到先得,因此在preprocess中需要进行shuffle

AssignLable

该函数用于返回CenterNet训练所需要的labels,例如heatmap,height和offset

比较重要,可以用来移植?

assigner = dict(
target_assigner=target_assigner,
out_size_factor=get_downsample_factor(model),
dense_reg=1,
gaussian_overlap=0.1,
max_objs=500,
min_radius=2,
)

target_assigner = dict(
tasks=tasks,
)

tasks = [
dict(num_class=1, class_names=["car"]),
dict(num_class=2, class_names=["truck", "construction_vehicle"]),
dict(num_class=2, class_names=["bus", "trailer"]),
dict(num_class=1, class_names=["barrier"]),
dict(num_class=2, class_names=["motorcycle", "bicycle"]),
dict(num_class=2, class_names=["pedestrian", "traffic_cone"]),
]

  • out_size_factor: 决定了生成的图的大小
  • tasks: 多个任务的类别标签
  • gaussian_overlap: hm的输出的覆盖半径?需要后面细看
  • _max_objs: 最大场景下的对象个数?为啥要设这玩意
  • _min_radius: 最小半径?是说lidar的盲区半径的意思?

draw_umich_gaussian函数很重要,可以拿来抄

gaussian

生成的label包括:hm;anno_boxs;inds;masks;cats;

det3d/core/utils/center_utils.py: gaussian_radius 没看懂???why???

是从CornerNet里copy的代码???我感觉这个半径有点奇怪。

暂时忽略,等看完loss和那啥再做决定

Reformat

写个字典把需要的数据打个包而已

Dataloader

det3d/torchie/parallel/collate.py 的collate_kitti完成了对字典数据的collect,具体没看,之后再说