Skip to content

Commit 2fdfdaf

Browse files
authored
from transID to master (#1) (#1764)
Added support for transformer-based ReID
1 parent c43e952 commit 2fdfdaf

52 files changed

Lines changed: 2919 additions & 49 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
.local
1010
.DS_Store
1111
examples/.DS_Store
12-
12+
*~
1313
# Tensorflow checkpoints
1414
*.ckpt
1515
snapshot-*

deeplabcut/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@
8585
evaluate_network,
8686
return_evaluate_network_data,
8787
analyze_videos,
88+
create_tracking_dataset,
8889
analyze_time_lapse_frames,
8990
convert_detections2tracklets,
9091
extract_maps,
@@ -95,6 +96,7 @@
9596
export_model,
9697
)
9798

99+
98100
from deeplabcut.pose_estimation_3d import (
99101
calibrate_cameras,
100102
check_undistortion,
@@ -111,4 +113,6 @@
111113
from deeplabcut.post_processing import filterpredictions, analyzeskeleton
112114

113115

116+
117+
114118
from deeplabcut.version import __version__, VERSION

deeplabcut/create_project/new.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def create_new_project(
9292
# Create project and sub-directories
9393
if not DEBUG and project_path.exists():
9494
print('Project "{}" already exists!'.format(project_path))
95-
return
95+
return os.path.join(str(project_path), "config.yaml")
9696
video_path = project_path / "videos"
9797
data_path = project_path / "labeled-data"
9898
shuffles_path = project_path / "training-datasets"

deeplabcut/generate_training_dataset/multiple_individuals_trainingsetmanipulation.py

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,10 @@
3434
)
3535

3636
def format_multianimal_training_data(
37-
df, train_inds, project_path, n_decimals=2,
37+
df,
38+
train_inds,
39+
project_path,
40+
n_decimals=2,
3841
):
3942
train_data = []
4043
nrows = df.shape[0]
@@ -318,7 +321,10 @@ def create_multianimaltraining_dataset(
318321

319322
# Make training file!
320323
data = format_multianimal_training_data(
321-
Data, trainIndices, cfg["project_path"], numdigits,
324+
Data,
325+
trainIndices,
326+
cfg["project_path"],
327+
numdigits,
322328
)
323329

324330
if len(trainIndices) > 0:
@@ -374,7 +380,10 @@ def create_multianimaltraining_dataset(
374380
)
375381
path_test_config = str(
376382
os.path.join(
377-
cfg["project_path"], Path(modelfoldername), "test", "pose_cfg.yaml",
383+
cfg["project_path"],
384+
Path(modelfoldername),
385+
"test",
386+
"pose_cfg.yaml",
378387
)
379388
)
380389
path_inference_config = str(
@@ -475,7 +484,10 @@ def create_multianimaltraining_dataset(
475484

476485

477486
def convert_cropped_to_standard_dataset(
478-
config_path, recreate_datasets=True, delete_crops=True, back_up=True,
487+
config_path,
488+
recreate_datasets=True,
489+
delete_crops=True,
490+
back_up=True,
479491
):
480492
import pandas as pd
481493
import pickle
@@ -514,7 +526,8 @@ def convert_cropped_to_standard_dataset(
514526
return
515527

516528
datasets_folder = os.path.join(
517-
project_path, auxiliaryfunctions.GetTrainingSetFolder(cfg),
529+
project_path,
530+
auxiliaryfunctions.GetTrainingSetFolder(cfg),
518531
)
519532
df_old = pd.read_hdf(
520533
os.path.join(datasets_folder, "CollectedData_" + cfg["scorer"] + ".h5"),

deeplabcut/generate_training_dataset/trainingsetmanipulation.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -245,14 +245,14 @@ def dropimagesduetolackofannotation(config):
245245

246246
def dropunlabeledframes(config):
247247
"""
248-
Drop entries such that all the bodyparts are not labeled from the annotation files, i.e. h5 and csv files
248+
Drop entries such that all the bodyparts are not labeled from the annotation files, i.e. h5 and csv files
249249
Will be carried out iteratively for all *folders* in labeled-data.
250-
250+
251251
Parameter
252252
----------
253253
config : string
254254
String containing the full path of the config file in the project.
255-
255+
256256
"""
257257
cfg = auxiliaryfunctions.read_config(config)
258258
videos = cfg["video_sets"].keys()
@@ -275,9 +275,9 @@ def dropunlabeledframes(config):
275275
DC.to_csv(
276276
os.path.join(str(folder), "CollectedData_" + cfg["scorer"] + ".csv")
277277
)
278-
278+
279279
print("Dropped ", dropped, "entries in ",folder)
280-
280+
281281
print("Done.")
282282

283283
def check_labels(
@@ -747,7 +747,7 @@ def create_training_dataset(
747747
748748
augmenter_type: string
749749
Type of augmenter. Currently default, imgaug, tensorpack, and deterministic are supported.
750-
750+
751751
posecfg_template: string (optional, default=None)
752752
Path to a pose_cfg.yaml file to use as a template for generating the new one for the current iteration. Useful if you
753753
would like to start with the same parameters a previous training iteration. None uses the default pose_cfg.yaml.
@@ -834,7 +834,7 @@ def create_training_dataset(
834834
"deterministic",
835835
]:
836836
raise ValueError("Invalid augmenter type:", augmenter_type)
837-
837+
838838
if posecfg_template:
839839
if net_type != prior_cfg["net_type"]:
840840
print(
@@ -844,7 +844,7 @@ def create_training_dataset(
844844
print(
845845
"WARNING: Specified augmenter_type does not match dataset_type from posecfg_template path entered. Proceed with caution."
846846
)
847-
847+
848848
# Loading the encoder (if necessary downloading from TF)
849849
dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
850850
if not posecfg_template:

deeplabcut/pose_estimation_tensorflow/core/evaluate_multianimal.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,6 +296,7 @@ def evaluate_multianimal_full(
296296
)
297297

298298
data_path = resultsfilename.split(".h5")[0] + "_full.pickle"
299+
299300
if plotting:
300301
foldername = os.path.join(
301302
str(evaluationfolder),
@@ -308,7 +309,12 @@ def evaluate_multianimal_full(
308309
if os.path.isfile(data_path):
309310
print("Model already evaluated.", resultsfilename)
310311
else:
311-
sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
312+
313+
(
314+
sess,
315+
inputs,
316+
outputs,
317+
) = predict.setup_pose_prediction(dlc_cfg)
312318

313319
PredicteData = {}
314320
dist = np.full((len(Data), len(all_bpts)), np.nan)
@@ -359,6 +365,7 @@ def evaluate_multianimal_full(
359365
:, ["sample", "y", "x", "bodyparts"]
360366
].to_numpy()
361367
peaks_gt[:, 1:3] = (peaks_gt[:, 1:3] - stride // 2) / stride
368+
362369
pred = predictma.predict_batched_peaks_and_costs(
363370
dlc_cfg,
364371
np.expand_dims(frame, axis=0),
@@ -367,6 +374,7 @@ def evaluate_multianimal_full(
367374
outputs,
368375
peaks_gt.astype(int),
369376
)
377+
370378
if not pred:
371379
continue
372380
else:

deeplabcut/pose_estimation_tensorflow/core/predict.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,13 @@
2828
from .openvino.session import OpenVINOSession
2929

3030

31-
def setup_pose_prediction(cfg, allow_growth=False):
31+
def setup_pose_prediction(cfg, allow_growth=False, collect_extra=False):
3232
tf.compat.v1.reset_default_graph()
3333
inputs = tf.compat.v1.placeholder(
3434
tf.float32, shape=[cfg["batch_size"], None, None, 3]
3535
)
3636
net_heads = PoseNetFactory.create(cfg).test(inputs)
37+
extra_dict = {}
3738
outputs = [net_heads["part_prob"]]
3839
if cfg["location_refinement"]:
3940
outputs.append(net_heads["locref"])
@@ -44,6 +45,9 @@ def setup_pose_prediction(cfg, allow_growth=False):
4445

4546
outputs.append(net_heads["peak_inds"])
4647

48+
if collect_extra:
49+
extra_dict["features"] = net_heads["features"]
50+
4751
restorer = tf.compat.v1.train.Saver()
4852

4953
if allow_growth:
@@ -58,7 +62,10 @@ def setup_pose_prediction(cfg, allow_growth=False):
5862
# Restore variables from disk.
5963
restorer.restore(sess, cfg["init_weights"])
6064

61-
return sess, inputs, outputs
65+
if collect_extra:
66+
return sess, inputs, outputs, extra_dict
67+
else:
68+
return sess, inputs, outputs
6269

6370

6471
def extract_cnn_output(outputs_np, cfg):

deeplabcut/pose_estimation_tensorflow/core/predict_multianimal.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,14 @@ def predict_batched_peaks_and_costs(
211211
peaks_gt=None,
212212
n_points=10,
213213
n_decimals=3,
214+
extra_dict=None,
214215
):
216+
217+
if extra_dict:
218+
features = sess.run(extra_dict['features'],
219+
feed_dict = {inputs:images_batch}
220+
)
221+
215222
scmaps, locrefs, *pafs, peaks = sess.run(outputs, feed_dict={inputs: images_batch})
216223
if ~np.any(peaks):
217224
return []
@@ -249,8 +256,10 @@ def predict_batched_peaks_and_costs(
249256
)
250257
for i, costs in enumerate(costs_gt):
251258
preds[i]["groundtruth_costs"] = costs
252-
253-
return preds
259+
if extra_dict:
260+
return preds, features
261+
else:
262+
return preds
254263

255264

256265
def find_local_maxima(scmap, radius, threshold):

deeplabcut/pose_estimation_tensorflow/core/train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def train(
148148
maxiters,
149149
max_to_keep=5,
150150
keepdeconvweights=True,
151-
allow_growth=False,
151+
allow_growth=True,
152152
):
153153
start_path = os.getcwd()
154154
os.chdir(

deeplabcut/pose_estimation_tensorflow/core/train_multianimal.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def train(
3838
maxiters,
3939
max_to_keep=5,
4040
keepdeconvweights=True,
41-
allow_growth=False,
41+
allow_growth=True,
4242
):
4343
start_path = os.getcwd()
4444
os.chdir(

0 commit comments

Comments
 (0)