Transfrer Learning MoveNet Thunder

90 Views Asked by At
class MoveNetPreprocessor(object):
  """Helper class to preprocess pose sample images for classification."""
 
  def __init__(self,
               images_in_folder,
               images_out_folder,
               csvs_out_path): 
    self._images_in_folder = images_in_folder
    self._images_out_folder = images_out_folder
    self._csvs_out_path = csvs_out_path
    self._messages = []

    # Create a temp dir to store the pose CSVs per class
    self._csvs_out_folder_per_class = tempfile.mkdtemp()

    # Get list of pose classes and print image statistics
    self._pose_class_names = sorted(
        [n for n in os.listdir(self._images_in_folder) if not n.startswith('.')])
  
  def preprocess_image(self, image_path):
    # Load and preprocess the image using your desired method
    image = tf.io.read_file(image_path)
    image = tf.io.decode_jpeg(image)
    # Apply any additional preprocessing steps here
    return image
  
  def process(self, per_pose_class_limit=None, detection_threshold=0.1):
    # Loop through the classes and preprocess its images
    for pose_class_name in self._pose_class_names:
      print('Preprocessing', pose_class_name, file=sys.stderr)
      
      # Paths for the pose class
      images_in_folder = os.path.join(self._images_in_folder, pose_class_name)
      images_out_folder = os.path.join(self._images_out_folder, pose_class_name)
      csv_out_path = os.path.join(self._csvs_out_folder_per_class, pose_class_name + '.csv')
      
      if not os.path.exists(images_out_folder):
        os.makedirs(images_out_folder)
      
      # Detect landmarks in each image and write it to a CSV file
      with open(csv_out_path, 'w') as csv_out_file:
        csv_out_writer = csv.writer(csv_out_file, delimiter=',', quoting=csv.QUOTE_MINIMAL)
        
        # Get list of images
        image_names = sorted([n for n in os.listdir(images_in_folder) if not n.startswith('.')])
        if per_pose_class_limit is not None:
          image_names = image_names[:per_pose_class_limit]

        valid_image_count = 0
        
        # Detect pose landmarks from each image
        for image_name in tqdm.tqdm(image_names):
          image_path = os.path.join(images_in_folder, image_name)
          
          try:
            image = self.preprocess_image(image_path)
            image_height, image_width, channel = image.shape
          
          except:
            self._messages.append('Skipped ' + image_path + '. Invalid image.')
            continue
          
          # Skip images that aren't RGB because MoveNet requires RGB images
          if channel != 3:
            self._messages.append('Skipped ' + image_path + '. Image isn\'t in RGB format.')
            continue

          person = detect(image)

          # Save landmarks if all landmarks were detected
          min_landmark_score = min([keypoint.score for keypoint in person.keypoints])
          should_keep_image = min_landmark_score >= detection_threshold
          if not should_keep_image:
            self._messages.append('Skipped ' + image_path + '. No pose was confidently detected.')
            continue

          valid_image_count += 1

          # Draw the prediction result on top of the image for debugging later
          output_overlay = draw_prediction_on_image(image.numpy().astype(np.uint8), person,
                                                    close_figure=True, keep_input_size=True)
          
          # Write detection result into an image file
          output_frame = cv2.cvtColor(output_overlay, cv2.COLOR_RGB2BGR)
          cv2.imwrite(os.path.join(images_out_folder, image_name), output_frame)

          # Get landmarks and scale them to the same size as the input image
          pose_landmarks = np.array(
              [[keypoint.coordinate.x, keypoint.coordinate.y, keypoint.score]
               for keypoint in person.keypoints],
               dtype=np.float32)

          # Write the landmark coordinates to its per-class CSV file
          coordinates = pose_landmarks.flatten().astype(np.str).tolist()
          csv_out_writer.writerow([image_name] + coordinates)
          
        if not valid_image_count:
          raise RuntimeError('No valid images found for the "{}" class.'.format(pose_class_name))

    # Print the error messages collected during preprocessing.
    print('\n'.join(self._messages))

    # Combine all per-class CSVs into a single output file
    all_landmarks_df = self._all_landmarks_as_dataframe()
    all_landmarks_df.to_csv(self._csvs_out_path, index=False)

  def class_names(self):
    """List of classes found in the training dataset."""
    return self._pose_class_names

  def _all_landmarks_as_dataframe(self):
    """Merge all per-class CSVs into a single dataframe."""
    total_df = None
    for class_index, class_name in enumerate(self._pose_class_names):
      csv_out_path = os.path.join(self._csvs_out_folder_per_class, class_name + '.csv')
      per_class_df = pd.read_csv(csv_out_path, header=None)

      # Add the labels
      per_class_df['class_no'] = [class_index] * len(per_class_df)
      per_class_df['class_name'] = [class_name] * len(per_class_df)

      # Append the folder name to the filename column (first column)
      per_class_df[per_class_df.columns[0]] = (os.path.join(class_name, '') + per_class_df[per_class_df.columns[0]].astype(str))

      if total_df is None:
        # For the first class, assign its data to the total dataframe
        total_df = per_class_df
      
      else:
        # Concatenate each class's data into the total dataframe
        total_df = pd.concat([total_df, per_class_df], axis=0)

    list_name = [[bodypart.name + '_x', bodypart.name + '_y', bodypart.name + '_score'] for bodypart in BodyPart]
    header_name = []
    for columns_name in list_name:
      header_name += columns_name
    header_name = ['file_name'] + header_name
    header_map = {total_df.columns[i]: header_name[i]
                  for i in range(len(header_name))}

    total_df.rename(header_map, axis=1, inplace=True)
    
    return total_df

This is the class code

dataset_in = '/content/drive/MyDrive/GROUP 3 THESIS/Stretches /Stretches/(SAMPLE 1)'

if not os.path.isdir(dataset_in):
    raise Exception("dataset_in is not a valid directory")
else:
    print("Dataset folder:", dataset_in)
    print("Contents:", os.listdir(dataset_in))

dataset_out = '/content/drive/MyDrive/GROUP 3 THESIS/Stretches /Stretches/(SAMPLE2)'

if not os.path.isdir(dataset_out):
    raise Exception("dataset_in is not a valid directory")
else:
    print("Dataset folder:", dataset_out)
    print("Contents:", os.listdir(dataset_out))

if is_skip_step_1 and use_custom_dataset:
    IMAGES_ROOT = os.path.join(dataset_in, 'Training')
    IMAGES_ROOT1 = os.path.join(dataset_out, 'Training')
    CLASS1_FOLDER = 'Class 1'
    CLASS2_FOLDER = 'Class 2'
    images_in_folder_class1 = os.path.join(IMAGES_ROOT, CLASS1_FOLDER)
    images_out_folder_class1 = os.path.join(IMAGES_ROOT1, CLASS1_FOLDER)
    csvs_out_path_class1 = os.path.join(images_out_folder_class1, 'dataset_class1.csv')
    images_in_folder_class2 = os.path.join(IMAGES_ROOT, CLASS2_FOLDER)
    images_out_folder_class2 = os.path.join(IMAGES_ROOT1, CLASS2_FOLDER)
    csvs_out_path_class2 = os.path.join(images_out_folder_class2, 'dataset_class2.csv')

    # Check the contents of the image folders
    print("Images in folder 1:", os.listdir(images_in_folder_class1))
    print("Images in folder 2:", os.listdir(images_in_folder_class2))

    preprocessor_class1 = MoveNetPreprocessor(
        images_in_folder=images_in_folder_class1,
        images_out_folder=images_out_folder_class1,
        csvs_out_path=csvs_out_path_class1,
    )

    preprocessor_class1.process(per_pose_class_limit=None)

    preprocessor_class2 = MoveNetPreprocessor(
        images_in_folder=images_in_folder_class2,
        images_out_folder=images_out_folder_class2,
        csvs_out_path=csvs_out_path_class2,
    )

    preprocessor_class2.process(per_pose_class_limit=None)

It shows this error

Dataset folder: /content/drive/MyDrive/GROUP 3 THESIS/Stretches /Stretches/(SAMPLE 1)
Contents: ['Training', 'Validation']
Dataset folder: /content/drive/MyDrive/GROUP 3 THESIS/Stretches /Stretches/(SAMPLE2)
Contents: ['Training', 'Validation']
Images in folder 1: ['IMG_9659.JPG', 'IMG_9646.JPG', 'IMG_9649.JPG', 'IMG_9684.JPG', 'IMG_9688.JPG', 'IMG_9645.JPG', 'IMG_9683.JPG', 'IMG_9685.JPG']
Images in folder 2: ['IMG_20230414_115238.jpg', 'IMG_20230414_115240.jpg', 'IMG_20230414_115242.jpg', 'IMG_20230414_115250.jpg', 'IMG_20230414_115249.jpg', 'IMG_20230414_115332.jpg', 'IMG_20230414_115334.jpg', 'IMG_20230414_115337.jpg']
Preprocessing IMG_9645.JPG
---------------------------------------------------------------------------
NotADirectoryError                        Traceback (most recent call last)
<ipython-input-55-246279deb933> in <cell line: 17>()
     37     )
     38 
---> 39     preprocessor_class1.process(per_pose_class_limit=None)
     40 
     41     preprocessor_class2 = MoveNetPreprocessor(

<ipython-input-54-46ec39eeb5d2> in process(self, per_pose_class_limit, detection_threshold)
     43 
     44         # Get list of images
---> 45         image_names = sorted([n for n in os.listdir(images_in_folder) if not n.startswith('.')])
     46         if per_pose_class_limit is not None:
     47           image_names = image_names[:per_pose_class_limit]

NotADirectoryError: [Errno 20] Not a directory: '/content/drive/MyDrive/GROUP 3 THESIS/Stretches /Stretches/(SAMPLE 1)/Training/Class 1/IMG_9645.JPG'

I tried the code here: https://www.tensorflow.org/lite/tutorials/pose_classification?fbclid=IwAR1IMAK4633UkSaNtpCp6GmIxc9QJYITJ5wRdDJ0uMZw-eRMCJXWVpNGGbY#preprocess_the_train_dataset

However, there is no prompts there if I were to use my own produced dataset so I improvised by using the import drive command. I just want to know how to get pass the error of Not a Directory, my google drive directory kind of looks like this:

Training
├── Class 1
│   │   ├── image1.jpg
│   │   ├── image2.jpg
└── Class 2
    │   ├── image1.jpg
    │   ├── image2.jpg
0

There are 0 best solutions below