Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 41 additions & 24 deletions deeplabcut/post_processing/analyze_skeleton.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,7 @@ def analyzeskeleton(
destfolder=None,
modelprefix="",
track_method="",
return_data=False,
):
"""Extracts length and orientation of each "bone" of the skeleton.

Expand Down Expand Up @@ -228,15 +229,25 @@ def analyzeskeleton(
For multiple animals, must be either 'box', 'skeleton', or 'ellipse' and will
be taken from the config.yaml file if none is given.

return_data: bool, optional, default=False
If True, returns a dictionary of the filtered data keyed by video names.

Returns
-------
None
video_to_skeleton_df
Dictionary mapping video filepaths to skeleton dataframes.

* If no videos exist, the dictionary will be empty.
* If a video is not analyzed, the corresponding value in the dictionary will be
None.
"""
# Load config file, scorer and videos
cfg = auxiliaryfunctions.read_config(config)
if not cfg["skeleton"]:
raise ValueError("No skeleton defined in the config.yaml.")

video_to_skeleton_df = {}

track_method = auxfun_multianimal.get_track_method(cfg, track_method=track_method)
DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
cfg,
Expand All @@ -256,33 +267,39 @@ def analyzeskeleton(
df, filepath, scorer, _ = auxiliaryfunctions.load_analyzed_data(
destfolder, vname, DLCscorer, filtered, track_method
)
output_name = filepath.replace(".h5", f"_skeleton.h5")
if os.path.isfile(output_name):
print(f"Skeleton in video {vname} already processed. Skipping...")
continue

bones = {}
if "individuals" in df.columns.names:
for animal_name, df_ in df.groupby(level="individuals", axis=1):
temp = df_.droplevel(["scorer", "individuals"], axis=1)
if animal_name != "single":
for bp1, bp2 in cfg["skeleton"]:
name = "{}_{}_{}".format(animal_name, bp1, bp2)
bones[name] = analyzebone(temp[bp1], temp[bp2])
else:
for bp1, bp2 in cfg["skeleton"]:
name = "{}_{}".format(bp1, bp2)
bones[name] = analyzebone(df[scorer][bp1], df[scorer][bp2])

skeleton = pd.concat(bones, axis=1)
skeleton.to_hdf(output_name, "df_with_missing", format="table", mode="w")
if save_as_csv:
skeleton.to_csv(output_name.replace(".h5", ".csv"))

except FileNotFoundError as e:
print(e)
video_to_skeleton_df[video] = None
continue

output_name = filepath.replace(".h5", f"_skeleton.h5")
if os.path.isfile(output_name):
print(f"Skeleton in video {vname} already processed. Skipping...")
video_to_skeleton_df[video] = pd.read_hdf(output_name, "df_with_missing")
continue

bones = {}
if "individuals" in df.columns.names:
for animal_name, df_ in df.groupby(level="individuals", axis=1):
temp = df_.droplevel(["scorer", "individuals"], axis=1)
if animal_name != "single":
for bp1, bp2 in cfg["skeleton"]:
name = "{}_{}_{}".format(animal_name, bp1, bp2)
bones[name] = analyzebone(temp[bp1], temp[bp2])
else:
for bp1, bp2 in cfg["skeleton"]:
name = "{}_{}".format(bp1, bp2)
bones[name] = analyzebone(df[scorer][bp1], df[scorer][bp2])

skeleton = pd.concat(bones, axis=1)
video_to_skeleton_df[video] = skeleton
skeleton.to_hdf(output_name, "df_with_missing", format="table", mode="w")
if save_as_csv:
skeleton.to_csv(output_name.replace(".h5", ".csv"))

if return_data:
return video_to_skeleton_df


if __name__ == "__main__":
parser = argparse.ArgumentParser()
Expand Down
147 changes: 86 additions & 61 deletions deeplabcut/post_processing/filtering.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ def filterpredictions(
destfolder=None,
modelprefix="",
track_method="",
return_data=False,
):
"""Fits frame-by-frame pose predictions.

Expand Down Expand Up @@ -146,9 +147,17 @@ def filterpredictions(
For multiple animals, must be either 'box', 'skeleton', or 'ellipse' and will
be taken from the config.yaml file if none is given.

return_data: bool, optional, default=False
If True, returns a dictionary of the filtered data keyed by video names.

Returns
-------
None
video_to_filtered_df
Dictionary mapping video filepaths to filtered dataframes.

* If no videos exist, the dictionary will be empty.
* If a video is not analyzed, the corresponding value in the dictionary will be
None.

Examples
--------
Expand Down Expand Up @@ -200,9 +209,12 @@ def filterpredictions(
)
Videos = auxiliaryfunctions.get_list_of_videos(video, videotype)

video_to_filtered_df = {}

if not len(Videos):
print("No video(s) were found. Please check your paths and/or 'videotype'.")
return
if return_data:
return video_to_filtered_df

for video in Videos:
if destfolder is None:
Expand All @@ -212,69 +224,82 @@ def filterpredictions(
vname = Path(video).stem

try:
_ = auxiliaryfunctions.load_analyzed_data(
df, filepath, _, _ = auxiliaryfunctions.load_analyzed_data(
destfolder, vname, DLCscorer, True, track_method
)
print(f"Data from {vname} were already filtered. Skipping...")
except FileNotFoundError: # Data haven't been filtered yet
try:
df, filepath, _, _ = auxiliaryfunctions.load_analyzed_data(
destfolder, vname, DLCscorer, track_method=track_method
video_to_filtered_df[video] = df
# Data has been filtered so continue to the next video
continue
except FileNotFoundError:
pass

# Data haven't been filtered yet
try:
df, filepath, _, _ = auxiliaryfunctions.load_analyzed_data(
destfolder, vname, DLCscorer, track_method=track_method
)
except FileNotFoundError as e:
video_to_filtered_df[video] = None
print(e)
continue

nrows = df.shape[0]
if filtertype == "arima":
temp = df.values.reshape((nrows, -1, 3))
placeholder = np.empty_like(temp)
for i in range(temp.shape[1]):
x, y, p = temp[:, i].T
meanx, _ = FitSARIMAXModel(
x, p, p_bound, alpha, ARdegree, MAdegree, False
)
meany, _ = FitSARIMAXModel(
y, p, p_bound, alpha, ARdegree, MAdegree, False
)
nrows = df.shape[0]
if filtertype == "arima":
temp = df.values.reshape((nrows, -1, 3))
placeholder = np.empty_like(temp)
for i in range(temp.shape[1]):
x, y, p = temp[:, i].T
meanx, _ = FitSARIMAXModel(
x, p, p_bound, alpha, ARdegree, MAdegree, False
)
meany, _ = FitSARIMAXModel(
y, p, p_bound, alpha, ARdegree, MAdegree, False
)
meanx[0] = x[0]
meany[0] = y[0]
placeholder[:, i] = np.c_[meanx, meany, p]
data = pd.DataFrame(
placeholder.reshape((nrows, -1)),
columns=df.columns,
index=df.index,
)
elif filtertype == "median":
data = df.copy()
mask = data.columns.get_level_values("coords") != "likelihood"
data.loc[:, mask] = df.loc[:, mask].apply(
signal.medfilt, args=(windowlength,), axis=0
)
elif filtertype == "spline":
data = df.copy()
mask_data = data.columns.get_level_values("coords").isin(("x", "y"))
xy = data.loc[:, mask_data].values
prob = data.loc[:, ~mask_data].values
missing = np.isnan(xy)
xy_filled = columnwise_spline_interp(xy, windowlength)
filled = ~np.isnan(xy_filled)
xy[filled] = xy_filled[filled]
inds = np.argwhere(missing & filled)
if inds.size:
# Retrieve original individual label indices
inds[:, 1] //= 2
inds = np.unique(inds, axis=0)
prob[inds[:, 0], inds[:, 1]] = 0.01
data.loc[:, ~mask_data] = prob
data.loc[:, mask_data] = xy
else:
raise ValueError(f"Unknown filter type {filtertype}")

outdataname = filepath.replace(".h5", "_filtered.h5")
data.to_hdf(outdataname, "df_with_missing", format="table", mode="w")
if save_as_csv:
print("Saving filtered csv poses!")
data.to_csv(outdataname.split(".h5")[0] + ".csv")
except FileNotFoundError as e:
print(e)
continue
meanx[0] = x[0]
meany[0] = y[0]
placeholder[:, i] = np.c_[meanx, meany, p]
data = pd.DataFrame(
placeholder.reshape((nrows, -1)),
columns=df.columns,
index=df.index,
)
elif filtertype == "median":
data = df.copy()
mask = data.columns.get_level_values("coords") != "likelihood"
data.loc[:, mask] = df.loc[:, mask].apply(
signal.medfilt, args=(windowlength,), axis=0
)
elif filtertype == "spline":
data = df.copy()
mask_data = data.columns.get_level_values("coords").isin(("x", "y"))
xy = data.loc[:, mask_data].values
prob = data.loc[:, ~mask_data].values
missing = np.isnan(xy)
xy_filled = columnwise_spline_interp(xy, windowlength)
filled = ~np.isnan(xy_filled)
xy[filled] = xy_filled[filled]
inds = np.argwhere(missing & filled)
if inds.size:
# Retrieve original individual label indices
inds[:, 1] //= 2
inds = np.unique(inds, axis=0)
prob[inds[:, 0], inds[:, 1]] = 0.01
data.loc[:, ~mask_data] = prob
data.loc[:, mask_data] = xy
else:
raise ValueError(f"Unknown filter type {filtertype}")

video_to_filtered_df[video] = data

outdataname = filepath.replace(".h5", "_filtered.h5")
data.to_hdf(outdataname, "df_with_missing", format="table", mode="w")
if save_as_csv:
print("Saving filtered csv poses!")
data.to_csv(outdataname.split(".h5")[0] + ".csv")

if return_data:
return video_to_filtered_df


if __name__ == "__main__":
Expand Down