3ab517035d61e06e9d59bcc9b39b9710897e82c2,deeplabcut/generate_training_dataset/frame_extraction.py,,extract_frames,#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#,62

Before Change


        print("Invalid MODE. Choose either "manual" or "automatic". Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
              for ipython/jupyter notebook for more details.")

    print(
        "\nFrames were selected.\nYou can now label the frames using the function "label_frames" (if you extracted enough frames for all videos).")

After Change


        else:
            from moviepy.editor import VideoFileClip

        has_failed = []
        for vindex, video in enumerate(videos):
            if userfeedback:
                print("Do you want to extract (perhaps additional) frames for video:", video, "?")
                askuser = input("yes/no")
            else:
                askuser = "yes"

            if askuser == "y" or askuser == "yes" or askuser == "Ja" or askuser == "ha"\
                    or askuser == "oui" or askuser == "ouais":  // multilanguage support :)
                if opencv:
                    cap = cv2.VideoCapture(video)
                    fps = cap.get(
                        5)  // https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html//videocapture-get
                    nframes = int(cap.get(7))
                else:
                    // Moviepy:
                    clip = VideoFileClip(video)
                    fps = clip.fps
                    nframes = int(np.ceil(clip.duration * 1. / fps))
                indexlength = int(np.ceil(np.log10(nframes)))

                fname = Path(video)
                output_path = Path(config).parents[0] / "labeled-data" / fname.stem

                if output_path.exists():
                    if len(os.listdir(output_path)):
                        askuser = input("The directory already contains some frames. Do you want to add to it?(yes/no): ")
                        if not (askuser == "y" or askuser == "yes" or askuser == "Y" or askuser == "Yes"):
                            sys.exit("Delete the frames and try again later!")

                if crop == "GUI":
                    cfg = select_cropping_area(config, [video])
                coords = cfg["video_sets"][video]["crop"].split(",")
                if crop and not opencv:
                    clip = clip.crop(y1=int(coords[2]), y2=int(coords[3]), x1=int(coords[0]), x2=int(coords[1]))
                elif not crop:
                    coords = None

                print("Extracting frames based on %s ..." % algo)
                if algo == "uniform":
                    if opencv:
                        frames2pick = frameselectiontools.UniformFramescv2(cap, numframes2pick, start, stop)
                    else:
                        frames2pick = frameselectiontools.UniformFrames(clip, numframes2pick, start, stop)
                elif algo == "kmeans":
                    if opencv:
                        frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(cap, numframes2pick, start, stop,
                                                                                       crop, coords, step=cluster_step,
                                                                                       resizewidth=cluster_resizewidth,
                                                                                       color=cluster_color)
                    else:
                        frames2pick = frameselectiontools.KmeansbasedFrameselection(clip, numframes2pick, start, stop,
                                                                                    step=cluster_step,
                                                                                    resizewidth=cluster_resizewidth,
                                                                                    color=cluster_color)
                else:
                    print(
                        "Please implement this method yourself and send us a pull request! Otherwise, choose "uniform" or "kmeans".")
                    frames2pick = []

                if not len(frames2pick):
                    print("Frame selection failed...")
                    return

                output_path = Path(config).parents[0] / "labeled-data" / Path(video).stem
                is_valid = []
                if opencv:
                    for index in frames2pick:
                        cap.set(1, index)  // extract a particular frame
                        ret, frame = cap.read()
                        if ret:
                            image = img_as_ubyte(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                            img_name = str(output_path) + "/img" + str(index).zfill(indexlength) + ".png"
                            if crop:
                                io.imsave(img_name, image[int(coords[2]):int(coords[3]), int(coords[0]):int(coords[1]),
                                                    :])  // y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                            else:
                                io.imsave(img_name, image)
                            is_valid.append(True)
                        else:
                            print("Frame", index, " not found!")
                            is_valid.append(False)
                    cap.release()
                else:
                    for index in frames2pick:
                        try:
                            image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                            img_name = str(output_path) + "/img" + str(index).zfill(indexlength) + ".png"
                            io.imsave(img_name, image)
                            if np.var(image) == 0:  // constant image
                                print(
                                    "Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True")
                            is_valid.append(True)
                        except FileNotFoundError:
                            print("Frame // ", index, " does not exist.")
                            is_valid.append(False)
                    clip.close()
                    del clip

                if not any(is_valid):
                    has_failed.append(True)
                else:
                    has_failed.append(False)

        if all(has_failed):
            print("Frame extraction failed. Video files must be corrupted.")
            return
        elif any(has_failed):
            print("Although most frames were extracted, some were invalid.")
        else:
            print("Frames were successfully extracted.")
        print("\nYou can now label the frames using the function "label_frames" "
              "(if you extracted enough frames for all videos).")
    else:
        print("Invalid MODE. Choose either "manual" or "automatic". Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 5

Instances


Project Name: AlexEMG/DeepLabCut
Commit Name: 3ab517035d61e06e9d59bcc9b39b9710897e82c2
Time: 2020-04-04
Author: mathis@rowland.harvard.edu
File Name: deeplabcut/generate_training_dataset/frame_extraction.py
Class Name:
Method Name: extract_frames


Project Name: AlexEMG/DeepLabCut
Commit Name: a51d9ee3185ae5f83b5903d6b8549257ea4f13ff
Time: 2020-04-02
Author: amathis@fas.harvard.edu
File Name: deeplabcut/generate_training_dataset/frame_extraction.py
Class Name:
Method Name: extract_frames


Project Name: rlworkgroup/garage
Commit Name: eff8fbd2e3fb295d3c42dbc28d9d7cbcb6ca64ad
Time: 2018-08-02
Author: eric-heiden@users.noreply.github.com
File Name: tests/envs/test_envs.py
Class Name: TestEnvs
Method Name: test_env