Skip to content

Commit cde1a2a

Browse files
committed
Initial Commit
1 parent 4e2e06f commit cde1a2a

27 files changed

Lines changed: 1022 additions & 0 deletions

‎Data/.gitkeep‎

Whitespace-only changes.

‎FlashLidarGIF.gif‎

15.7 MB
Loading

‎PreProcessingScripts/.gitkeep‎

Whitespace-only changes.
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
function [XYZ] = calculateXYZ(Range)
2+
% Setup Parameters
3+
fov = 3*[1 1]; % [az el] adjust this to match sensor FOV
4+
sensorLen = 128;
5+
6+
% Initialize matrices
7+
az = linspace(-fov(1)/2,fov(1)/2,sensorLen);
8+
el = linspace(-fov(2)/2,fov(2)/2,sensorLen);
9+
az_matrix = repmat(az,[numel(el) 1]); %flip az as well to get same orientation as raw images
10+
el_matrix= repmat(flip(el)',[1 numel(az)]);
11+
12+
% Calculate cartesian coordinates from azimuth and elevation
13+
[x_matrix,y_matrix,z_matrix] = sph2cart(az_matrix*pi/180,el_matrix*pi/180,Range);
14+
XYZ = cat(3,x_matrix,y_matrix,z_matrix);
15+
end
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
function [mask] = convertROItoMask(Image,ROI, RangeBuffer)
2+
% Initialize mask
3+
mask = false(size(Image));
4+
5+
if ~isempty(ROI)
6+
% Create mask for all points inside bounding box
7+
inROI = false(size(Image));
8+
ROI = round(ROI);
9+
inROI(ROI(2) :min(128,ROI(2)+ROI(4)), ROI(1):min(128,ROI(1)+ROI(3))) = true;
10+
11+
% Calculate the "mode" of the range of the points within the bounding
12+
% box, which is an estimate of the distance of the detected vehicle
13+
% and eliminate all points that are too far or too close compared to the
14+
% estimated distance (Range +/- threshold
15+
rangeWindow = fix(Image(inROI & Image~=0)/1000)*1000;
16+
midRange = mode(rangeWindow, "all");
17+
inRange = (Image > midRange-RangeBuffer) & (Image < midRange+RangeBuffer);
18+
19+
% Combine logical masks
20+
mask = (inRange & inROI);
21+
22+
% Morphological operations to improve estimated masks
23+
% Fill holes
24+
mask = imfill(mask, 'holes');
25+
% Close mask
26+
radius = 9;
27+
decomposition = 0;
28+
se = strel('disk', radius, decomposition);
29+
mask = imclose(mask, se);
30+
end
31+
32+
end
33+
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
function [mask, cuboid] = convertROItoMask5Ch(Img5ch,maskSize, ROI, RangeBuffer, bgRange)
2+
% Improved version of ROI to Mask conversion function
3+
% This uses the same "mode" algorithm but also includes background removal,
4+
% ground segmentation using SMRF, point cloud denoising, point cloud clustering,
5+
% and a cuboid fitting to also output the 3D cuboid ground truth on top of
6+
% the segmentation mask of the vehicle.
7+
% Select the range channel from a 5-channel image
8+
RangeImg = Img5ch(:,:,5);
9+
inROI = false(maskSize);
10+
ROI = round(ROI);
11+
% Mark pixels inside the provided ROI (clamped to 128x128 image)
12+
inROI(ROI(2) :min(128,ROI(2)+ROI(4)), ROI(1):min(128,ROI(1)+ROI(3))) = true;
13+
% Build a histogram of range values within ROI below background range (quantized to 10)
14+
rangeWindow = fix(RangeImg(inROI & (RangeImg < bgRange))./10).*10;
15+
% Estimate dominant range within the ROI
16+
midRange = mode(rangeWindow, "all");
17+
% Create a band-pass mask around the dominant range
18+
inRange = (RangeImg > midRange-RangeBuffer) & (RangeImg < midRange+RangeBuffer);
19+
mask = (inRange & inROI);
20+
XYZ = Img5ch(:,:,1:3);
21+
pCloud = pointCloud(XYZ);
22+
% Segment ground points and exclude them from the mask
23+
groundPtsIdx = segmentGroundSMRF(pCloud, ElevationThreshold=0.001);
24+
mask(groundPtsIdx) = false;
25+
if nnz(mask) ~= 0
26+
goodIdx = find(mask);
27+
pCloudCrop = select(pCloud,goodIdx);
28+
% Remove sparse outliers from the cropped point cloud
29+
[~,~,outlierIndices] = pcdenoise(pCloudCrop, "NumNeighbors",10, "Threshold",2);
30+
mask(goodIdx(outlierIndices)) = false;
31+
32+
goodIdx = find(mask);
33+
pCloudCrop = select(pCloud,goodIdx);
34+
% Segment remaining points by distance and remove small/noise clusters
35+
labels = pcsegdist(pCloudCrop,5);
36+
outlierIndices = find(labels ~= mode(labels));
37+
mask(goodIdx(outlierIndices)) = false;
38+
end
39+
goodIdx = find(mask);
40+
pCloudCrop = select(pCloud,goodIdx);
41+
% Fit a cuboid to the filtered point cloud
42+
cuboid = pcfitcuboid(pCloudCrop);
43+
end
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
function generateFiveChanImages(dataFolder,imagesFolder)
2+
% Function that describes the creation of the 5 Channel images used to
3+
% train the SalsaNext model
4+
5+
if ~exist(imagesFolder,'dir')
6+
mkdir(imagesFolder);
7+
end
8+
9+
rangeFolder = dir(fullfile(dataFolder,'*.png'));
10+
numFiles = size(rangeFolder,1);
11+
lowNoise = 1;
12+
highNoise = 1500;
13+
highInt = 3200;
14+
15+
for ii = 1:numFiles
16+
% Load images and calculate xyz
17+
Img = im2double(imread(fullfile(dataFolder, rangeFolder(ii).name)));
18+
Range = Img(:,:,1).* highNoise;
19+
Intensity = Img(:,:,3) .* highInt;
20+
21+
% Set outliers to NaN
22+
OutLog = Range < lowNoise | Range > highNoise;
23+
Range(OutLog) = NaN;
24+
Intensity(OutLog) = NaN;
25+
XYZ = calculateXYZ(Range);
26+
27+
% Image are of 5-channels, namely x,y,z,intensity and range.
28+
Img5ch = zeros([size(Img,[1,2]), 5]);
29+
Img5ch(:,:,1:3) = XYZ;
30+
Img5ch(:,:,4) = Intensity;
31+
Img5ch(:,:,5) = Range;
32+
33+
34+
% Store images and labels as .mat and .png files respectively.
35+
imfile = fullfile(imagesFolder,[rangeFolder(ii).name(1:end-4), '.mat']);
36+
save(imfile,'Img5ch');
37+
end
38+
end

‎README.md‎

Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
# Flash LiDAR Vehicle Detection with MATLAB Dataset and Deep Learning Benchmarks
2+
3+
![Flash Lidar Vehicle Tracking](FlashLidarGIF.gif)
4+
5+
## Point Cloud Processing, Dataset Download, Deep Learning Models
6+
[![View <File Exchange Title> on File Exchange](https://www.mathworks.com/matlabcentral/images/matlab-file-exchange.svg)](https://www.mathworks.com/matlabcentral/fileexchange/182842-flash-lidar-vehicle-detection)
7+
8+
This repository provides code and workflows to test several state-of-the-art vehicle detection deep learning algorithms —including YOLOX, SalsaNext, and RandLA-Net— on a Flash Lidar dataset. The models are applied to 2D, 3D, and 5-channel data, supporting comprehensive benchmarking and research in autonomous vehicle perception. Also included are some point cloud processing techniques applied to the Flash Lidar to perform operations such as converting the raw data to images and point clouds, converting the ground truth bounding boxes to 2D/3D segmentation masks and cuboids, and more.
9+
10+
## Dataset Description
11+
This repository uses a Public Domain dataset collected specifically for benchmarking vehicle detection algorithms using a Flash LiDAR sensor. The dataset consists of 35 sequences featuring eight different vehicles—including seven four-wheel vehicles and a small plane. Each sequence was captured using a 128x128 Flash LiDAR sensor, recording range and intensity information for every detected point.
12+
13+
The dataset is available in four distinct formats to support a variety of deep learning models:
14+
15+
### 2D Image Format:
16+
17+
Each image is a uint16 3 channel image, where range data is mapped to the 'red' channel and intensity data to the 'blue' channel. This format is recommended to test 2D object detection models such as YOLOX and other 2D segmentation models. Included with the images is the ground truth in form of bounding boxes and segmentation masks.
18+
19+
### Point Cloud Format:
20+
21+
Data is provided as PCD files, retaining both the spatial location and intensity for each point.
22+
Ideal for 3D detection and segmentation models like RandLA-Net. Included with the point clouds is the ground truth in form of cuboids and segmentation masks.
23+
24+
25+
### Multi-Channel Image Format:
26+
27+
Each 128x128 image contains five channels: X, Y, Z, Range, and Intensity.
28+
Used for multi-channel models such as SalsaNext. Included with the images is the ground truth in form of bounding boxes, cuboids, and segmentation masks.
29+
30+
31+
### Raw Format:
32+
33+
Two arrays of size 128x128xnFrames for each sequence, representing the unfiltered and unprocessed range and intensity recorded directly from the sensor. Included with the raw data is the ground truth in form of bounding boxes, cuboids, and segmentation masks.
34+
35+
36+
All data has been denoised by removing points beyond the 99th percentile of the standard deviation, and values are normalized and scaled to maximize detail in each format (e.g., stretching uint16 values to the full 0–2<sup>16</sup> range).
37+
38+
39+
## Setup
40+
To Run:
41+
1. Download the Flash Lidar dataset from the links below in the format that works with the model that you want to test.
42+
2. Clone this repository and open it in MATLAB®.
43+
3. Extract the data and store it in the "Data\" directory of the repo.
44+
3. Follow the installation instructions below to set up required products and dependencies.
45+
4. Run the example scripts according to the model that you want to test.
46+
47+
### Flash Lidar Dataset Download Links
48+
49+
2D Data (PNG images + bounding box + mask ground truth)
50+
Download 2D Data (https://ssd.mathworks.com/supportfiles/lidar/data/FlashLidar/Data_2D.tar)
51+
52+
3D Data (PCD Point cloud + cuboids + mask ground truth)
53+
Download 3D Data (https://ssd.mathworks.com/supportfiles/lidar/data/FlashLidar/Data_3D.tar)
54+
55+
5-Channel Data (MAT 5-channel images [x, y, z, range, intensity] + bounding box + cuboids + mask ground truth)
56+
Download 5-Channel Data (https://ssd.mathworks.com/supportfiles/lidar/data/FlashLidar/Data_5Ch.tar)
57+
58+
Raw Data (MAT arrays [range, intensity] + bounding box + cuboids + mask ground truth)
59+
Download Raw Data (https://ssd.mathworks.com/supportfiles/lidar/data/FlashLidar/Data_Raw.tar)
60+
61+
Note that this dataset is very large and it can take several minutes to hours, depending on internet speed, to download the compressed folders using the links above.
62+
63+
### Additional information about set up
64+
In order to run the examples in this repo you will need to install the MATLAB® products listed below.
65+
Additionally you will need to install the support package for each deep learning model that you want to train or test using the provided dataset.
66+
67+
Note that to run and train the deep learning models it is recommended to use a computer with a dedicated GPU.
68+
69+
70+
### MathWorks Products (https://www.mathworks.com)
71+
72+
- MATLAB®
73+
- Deep Learning Toolbox™
74+
- Computer Vision Toolbox™
75+
- Lidar Toolbox™
76+
- Automated Visual Inspection Library™
77+
- Parallel Computing Toolbox™ (Recommended)
78+
79+
Requires MATLAB release R2024A or newer
80+
81+
82+
## Getting Started
83+
84+
This repository is organized to help you efficiently benchmark vehicle detection algorithms on Flash LiDAR data. Here’s how to navigate the folders and which dataset format to download for each deep learning workflow:
85+
86+
"YOLOX_ObjectDetection/"
87+
Contains code, scripts, and configurations for running the YOLOX 2D detection model.
88+
Required data: Download the 2D Image Format dataset (range and intensity as uint16x3 images).
89+
90+
"SalsaNext_SemanticSegmentation/"
91+
Includes all necessary code and scripts for the SalsaNext segmentation model, which operates on 5-channel LiDAR images.
92+
Required data: Download the 5-Channel Multi-Channel Image Format dataset (128x128 images with X, Y, Z, Range, Intensity channels).
93+
94+
"RandLANet_SemanticSegmentation/"
95+
Provides code and scripts for the RandLA-Net segmentation model, designed for processing point cloud (PCD) data.
96+
Required data: Download the Point Cloud Format dataset (point cloud files with location and intensity information).
97+
98+
"PreProcessingScripts/"
99+
Offers utilities and functions for general point cloud processing tasks such as filtering and visualization. These are the scripts that we used to convert the original raw data to the other formats downloadable in the repo. If you want to make modifications on how the data was processed you can use these as a starting point.
100+
Required data: Download the Raw Data Format dataset (Array including the recorded range and intensity).
101+
102+
## License
103+
104+
The license for the scripts is available in the License.txt file in this GitHub repository.
105+
The license for the dataset is included in the compressed folder you can download from the links above in a separate License.txt file.
106+
107+
## Community Support
108+
[MATLAB Central](https://www.mathworks.com/matlabcentral)
109+
110+
Copyright 2025 The MathWorks, Inc.
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
function cmap = lidarColorMap()
2+
% Lidar color map for the pandaset classes
3+
4+
% Copyright 2026 The MathWorks, Inc
5+
6+
cmap = [[30,30,30]; % UnClassified
7+
[0,255,0]; % Vegetation
8+
[255, 150, 255]; % Ground
9+
[255,0,255]; % Road
10+
[255,0,0]; % Road Markings
11+
[90, 30, 150]; % Side Walk
12+
[245,150,100]; % Car
13+
[250, 80, 100]; % Truck
14+
[150, 60, 30]; % Other Vehicle
15+
[255, 255, 0]; % Pedestrian
16+
[0, 200, 255]; % Road Barriers
17+
[170,100,150]; % Signs
18+
[30, 30, 255]]; % Building
19+
20+
cmap = cmap./255;
21+
22+
end

‎RandLANet_SemanticSegmentation/.gitkeep‎

Whitespace-only changes.

0 commit comments

Comments
 (0)