API Quick Reference

Warning

In active development. Currently pre-alpha -- API may change significantly in future releases.

MD.ai python client library on GitHub

Developer API documentation

QuickStart - setup project

import mdai

# Get variables from project info tab and user settings
DOMAIN = 'public.md.ai'
YOUR_PERSONAL_TOKEN = 'a1s2d3f4g4h5h59797kllh8vk'
PROJECT_ID = 'LxR6zdR2' #project info
DATASET_ID = 'D_ao3XWQ' #project info
PATH_FOR_DATA = '.'
PATH_TO_IMAGES = './mydata' #location of images if not downloaded from project

mdai_client = mdai.Client(domain=DOMAIN, access_token=YOUR_PERSONAL_TOKEN)

# download images and annotation data
p = mdai_client.project(PROJECT_ID, path=PATH_FOR_DATA)
# or, give path to images and download only the annotation data
p = mdai_client.project(PROJECT_ID, path=PATH_TO_IMAGES,  annotations_only=True)
p = mdai.preprocess.Project(annotations_fp=JSONPATH_FROM_FUNCTION_ABOVE, images_dir=PATH_TO_IMAGES)

# show labels to get desired label ids for project
p.show_label_groups()
# create class labels_dict from desired labels and give class value
labels_dict = {
    'L_ylR0L8': 0, # background
    'L_DlqEAl': 1, # lung opacity
}
# initiate project with labels_dict
p.set_labels_dict(labels_dict)
#prepare dataset to instantiate annotations and image ids
dataset = p.get_dataset_by_id(DATASET_ID)
dataset.prepare()

Display label classes

dataset.show_classes()

Example output:

    Label id: L_ylR0L8, Class id: 0, Class text: No Lung Opacity
    Label id: L_DlqEAl, Class id: 1, Class text: Lung Opacity

Display images

display_images(image_ids, titles=None, cols=3, cmap="gray", norm=None, interpolation=None)

 mdai.visualize.display_images(image_ids)

Get Dicom pixel array

load_dicom_image(image_id, to_RGB=False, rescale=False)

to_RGB returns a 3D array, rescale returns uint8 scaled to 255

 pixel_array = mdai.visualize.load_dicom_image(image_id, to_RGB=False, rescale=True)

Mask

 mask = mdai.visualize.load_mask(image_id, dataset)
 image_plus_mask = mdai.visualize.apply_mask(image, mask, color, alpha=0.3)

Get image with all annotations and masks

 image, class_ids, bboxes, masks = mdai.visualize.get_image_ground_truth(image_id, dataset)

Display image and masks

# shows one image and all of its segmentations
mdai.visualize.display_annotations(
    image,
    boxes,
    masks,
    class_ids,
    scores=None,
    title="",
    figsize=(16, 16),
    ax=None,
    show_mask=True,
    show_bbox=True,
    colors=None,
    captions=None,
)

Custom annotations dataset

  1. Create a project and dataset using the quickstart section
  2. Get annotation data
  3. Edit annotations and feed back into dataset data
  4. Initialize custom dataset with edited data
annotations = dataset.all_annotations
#edit annotations
editedAnnotations = annotations

#get data from dataset
dataset_data = dataset.dataset_data
dataset_data['annotations'] = editedAnnotations
customDataset = Dataset(dataset_data, images_dir)

#now use this new dataset for creating training/testing datsets with train_test_split

Split data into training and validation datasets

train_test_split(dataset, shuffle=True, validation_split=0.1)

train_dataset, valid_dataset = mdai.common_utils.train_test_split(dataset)

DataGenerator

DataGenerator(dataset, batch_size=32, dim=(32, 32), n_channels=1, n_classes=10, shuffle=True, to_RGB=True, rescale=False)

mdai.utils.keras_utils.DataGenerator(dataset)

Write to TFRecords

mdai.utils.tensorflow_utils.write_to_tfrecords(output_path, dataset)