Skip to content

Commit 3512955

Browse files
authored
Merge pull request #325 from Visual-Behavior/bv0.5.0-beta
Bv0.5.0 beta
2 parents 5ee23a3 + 033f45c commit 3512955

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+1353
-432
lines changed

Dockerfile

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,37 @@
11
# tagged aloception-oss:cuda-11.3.1-pytorch1.10.1-lightning1.4.1
2+
23
FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
4+
#FROM nvidia/cuda:11.6.0-cudnn8-devel-ubuntu20.04
5+
6+
ARG py=3.9
7+
ARG pytorch=1.13.1
8+
ARG torchvision=0.14.1
9+
ARG torchaudio=0.13.1
10+
ARG pytorch_lightning=1.9.0
11+
ARG pycyda=11.7
12+
313
ENV TZ=Europe/Paris
414
ENV DEBIAN_FRONTEND=noninteractive
15+
516
RUN apt-get update
617
RUN apt-get install -y build-essential nano git wget libgl1-mesa-glx
18+
719
# Usefull for scipy
820
RUN apt-get install -y gfortran
21+
# required for aloscene
22+
RUN apt-get install -y libglib2.0-0
23+
24+
925
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
1026
RUN bash Miniconda3-latest-Linux-x86_64.sh -b -p /miniconda
1127
ENV PATH=$PATH:/miniconda/condabin:/miniconda/bin
1228
RUN /bin/bash -c "source activate base"
1329
ENV HOME /workspace
1430
WORKDIR /workspace
15-
RUN conda install python=3.9 pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 cudatoolkit=11.3 opencv=4.5.3 -c pytorch -c conda-forge
16-
COPY requirements.txt /install/requirements.txt
17-
RUN pip install -r /install/requirements.txt
31+
32+
# Pytorch & pytorch litning
33+
RUN conda install pytorch==${pytorch} torchvision==${torchvision} torchaudio==${torchaudio} pytorch-cuda=${pycuda} -c pytorch -c nvidia
34+
RUN pip install pytorch_lightning==${pytorch_lightning}
35+
36+
COPY requirements-torch1.13.1.txt /install/requirements-torch1.13.1.txt
37+
RUN pip install -r /install/requirements-torch1.13.1.txt

README.md

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
<p align="center">
2-
<img src="images/aloception.png" style="text-align:center; width: 50%;" alt="Logo aloception" />
2+
<img src="images/aloception-oss.jpg" style="text-align:center; width: 50%;" alt="Logo aloception" />
33
</p>
44

55
<a href="https://visual-behavior.github.io/aloception-oss/">Documentation</a>
66

7-
[![Conventional Commits](https://img.shields.io/badge/Conventional%20Commits-0.3.0-green.svg)](https://conventionalcommits.org)
7+
[![Conventional Commits](https://img.shields.io/badge/Conventional%20Commits-0.5.0-green.svg)](https://conventionalcommits.org)
88

99
# Aloception open source software
1010

@@ -75,6 +75,26 @@ training pipelines with **augmented tensors**.
7575

7676
## Installation
7777

78+
### Docker install
79+
80+
```
81+
docker build -t aloception-oss:cuda-11.3.1-pytorch1.13.1-lightning1.9.0 .
82+
```
83+
84+
```
85+
docker run --gpus all -it -v /YOUR/WORKSPACE/:/workspace --privileged -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix aloception-oss:cuda-11.3.1-pytorch1.13.1-lightning1.9.0
86+
```
87+
88+
Or without building the image
89+
90+
```
91+
docker run --gpus all -it -v /YOUR/WORKSPACE/:/workspace --privileged -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix visualbehaviorofficial/aloception-oss:cuda-11.3.1-pytorch1.13.1-lightning1.9.0
92+
```
93+
94+
95+
96+
### Pip install
97+
7898
You first need to install PyTorch 1.10.1 based on your hardware and environment
7999
configuration. Please refer to the [pytorch website](https://pytorch.org/get-started/locally/) for this installation.
80100

@@ -90,7 +110,7 @@ Alternatively, you can clone the repository and use:
90110
pip install -e aloception-oss/
91111
```
92112

93-
Or setup the repo yourself in your env and install the dependencies
113+
Or setup the repo yourself in your env and install the dependencies
94114

95115
```sh
96116
pip install -r requirements.txt

alodataset/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,6 @@
1414
from .sintel_flow_dataset import SintelFlowDataset
1515
from .sintel_disparity_dataset import SintelDisparityDataset
1616
from .sintel_multi_dataset import SintelMultiDataset
17+
from .from_directory_dataset import FromDirectoryDataset
1718
from .woodScape_dataset import WooodScapeDataset
1819
from .woodScape_split_dataset import WoodScapeSplitDataset

alodataset/base_dataset.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def stream_loader(dataset, num_workers=2):
5656
return data_loader
5757

5858

59-
def train_loader(dataset, batch_size=1, num_workers=2, sampler=torch.utils.data.RandomSampler):
59+
def train_loader(dataset, batch_size=1, num_workers=2, sampler=torch.utils.data.RandomSampler, sampler_kwargs={}):
6060
"""Get training loader from the dataset
6161
6262
Parameters
@@ -69,14 +69,15 @@ def train_loader(dataset, batch_size=1, num_workers=2, sampler=torch.utils.data.
6969
Number of workers, by default 2
7070
sampler : torch.utils.data, optional
7171
Callback to sampler the dataset, by default torch.utils.data.RandomSampler
72+
Or instance of any class inheriting from torch.utils.data.Sampler
7273
7374
Returns
7475
-------
7576
torch.utils.data.DataLoader
7677
A generator
7778
"""
78-
sampler = sampler(dataset) if sampler is not None else None
79-
79+
if sampler is not None and not(isinstance(sampler, torch.utils.data.Sampler)):
80+
sampler = sampler(dataset, **sampler_kwargs)
8081
data_loader = torch.utils.data.DataLoader(
8182
dataset,
8283
# batch_sampler=batch_sampler,
@@ -332,7 +333,7 @@ def stream_loader(self, num_workers=2):
332333
"""
333334
return stream_loader(self, num_workers=num_workers)
334335

335-
def train_loader(self, batch_size=1, num_workers=2, sampler=torch.utils.data.RandomSampler):
336+
def train_loader(self, batch_size=1, num_workers=2, sampler=torch.utils.data.RandomSampler, sampler_kwargs={}):
336337
"""Get training loader from the dataset
337338
338339
Parameters
@@ -351,7 +352,7 @@ def train_loader(self, batch_size=1, num_workers=2, sampler=torch.utils.data.Ran
351352
torch.utils.data.DataLoader
352353
A generator
353354
"""
354-
return train_loader(self, batch_size=batch_size, num_workers=num_workers, sampler=sampler)
355+
return train_loader(self, batch_size=batch_size, num_workers=num_workers, sampler=sampler, sampler_kwargs=sampler_kwargs )
355356

356357
def prepare(self):
357358
"""Prepare the dataset. Not all child class need to implement this method.

alodataset/coco_base_dataset.py

Lines changed: 23 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,15 @@
66
import os
77
import numpy as np
88
import torch
9+
10+
from alodataset import BaseDataset
11+
from aloscene import BoundingBoxes2D, Frame, Labels, Mask
912
from collections import defaultdict
13+
from pathlib import Path
1014
from pycocotools import mask as coco_mask
1115
from pycocotools.coco import COCO
1216
from typing import Dict, Union
1317

14-
from alodataset import BaseDataset
15-
from aloscene import BoundingBoxes2D, Frame, Labels, Mask
1618

1719

1820
class CocoBaseDataset(BaseDataset):
@@ -78,10 +80,17 @@ def __init__(
7880
return
7981
else:
8082
assert img_folder is not None, "When sample = False, img_folder must be given."
81-
assert ann_file is not None, "When sample = False, ann_file must be given."
83+
assert ann_file is not None or "test" in img_folder, "When sample = False and the test split is not used, ann_file must be given."
84+
8285

8386
# Create properties
8487
self.img_folder = os.path.join(self.dataset_dir, img_folder)
88+
89+
if "test" in img_folder:
90+
#get a list of indices that don't rely on the annotation file
91+
self.items = [int(Path(os.path.join(self.img_folder, f)).stem) for f in os.listdir(self.img_folder) if os.path.isfile(os.path.join(self.img_folder, f))]
92+
return
93+
8594
self.coco = COCO(os.path.join(self.dataset_dir, ann_file))
8695
self.items = list(sorted(self.coco.imgs.keys()))
8796

@@ -231,7 +240,12 @@ def getitem(self, idx):
231240
return BaseDataset.__getitem__(self, idx)
232241

233242
image_id = self.items[idx]
243+
if "test" in self.img_folder:
244+
#get the filename from image_id without relying on annotation file
245+
return Frame(os.path.join(self.img_folder, f"{str(image_id).zfill(12)}.jpg"))
246+
234247
frame = Frame(os.path.join(self.img_folder, self.coco.loadImgs(image_id)[0]["file_name"]))
248+
235249
target = self.coco.loadAnns(self.coco.getAnnIds(image_id))
236250
target = {"image_id": image_id, "annotations": target}
237251
_, target = self.prepare(frame, target)
@@ -341,7 +355,12 @@ def __call__(self, image, target):
341355

342356

343357
if __name__ == "__main__":
344-
coco_dataset = CocoBaseDataset(sample=True)
358+
coco_dataset = CocoBaseDataset(sample=False, img_folder="test2017")
359+
#checking if regular getitem works
360+
frame = coco_dataset[0]
361+
frame.get_view().render()
362+
363+
#check if dataloader works
345364
for f, frames in enumerate(coco_dataset.train_loader(batch_size=2)):
346365
frames = Frame.batch_list(frames)
347366
frames.get_view().render()

alodataset/crowd_human_dataset.py

Lines changed: 24 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,18 @@ def __init__(
3636
return
3737
else:
3838
assert img_folder is not None, "When sample = False, img_folder must be given."
39-
assert ann_file is not None, "When sample = False, ann_file must be given."
39+
assert ann_file is not None or "test" in img_folder, "When sample = False and the test split is not used, ann_file must be given."
40+
41+
if "test" in img_folder:
42+
self._img_folder = img_folder
43+
self.img_folder = os.path.join(self.dataset_dir, img_folder, "images_test")
44+
45+
self.items = []
46+
for f in os.listdir(self.img_folder):
47+
if os.path.isfile(os.path.join(self.img_folder, f)):
48+
self.items.append({"ID": Path(os.path.join(self.img_folder, f)).stem})
49+
50+
return
4051

4152
assert type(img_folder) == type(ann_file), "img_folder & ann_file must be the same type."
4253

@@ -121,9 +132,14 @@ def getitem(self, idx):
121132
return BaseDataset.__getitem__(self, idx)
122133

123134
record = self.items[idx]
124-
ann_id = record["ann_id"]
125135
image_id = record["ID"]
126136

137+
if "test" in self.img_folder:
138+
#get the filename from image_id without relying on annotation file
139+
return Frame(os.path.join(self.img_folder, image_id + ".jpg"))
140+
141+
ann_id = record["ann_id"]
142+
127143
image_path = os.path.join(self.img_folder[ann_id], image_id + ".jpg")
128144

129145
frame = Frame(image_path)
@@ -272,6 +288,9 @@ def prepare(self):
272288
if self.sample is not None and self.sample is not False: # Nothing to do. Samples are ready
273289
return
274290

291+
if "test" in self.img_folder:
292+
return #The code for preparing test datasets exist but we are not doing that now
293+
275294
if self.dataset_dir.endswith("_prepared") and not os.path.exists(self.dataset_dir.replace("_prepared", "")):
276295
return
277296

@@ -294,7 +313,9 @@ def prepare(self):
294313

295314
def main():
296315
"""Main"""
297-
crowd_human_dataset = CrowdHumanDataset(img_folder="CrowdHuman_train", ann_file="annotation_train.odgt")
316+
crowd_human_dataset = CrowdHumanDataset(img_folder="CrowdHuman_test")
317+
stuff = crowd_human_dataset[0]
318+
stuff.get_view().render()
298319

299320
crowd_human_dataset.prepare()
300321
for i, frames in enumerate(crowd_human_dataset.train_loader(batch_size=2, sampler=None, num_workers=0)):

0 commit comments

Comments
 (0)