import tensorflow as tf #pip install tensorflow
import matplotlib.pyplot as plt
import numpy as np #pip install numpy
import tensorflow_datasets as tfds #in anaconda home, launch the command prompt and enter pip install tensorflow_datasets
from tensorflow import keras
tfds.disable_progress_bar() #to eliminate downloads processes
Find datasets
https://www.tensorflow.org/datasets/overview https://www.tensorflow.org/datasets/catalog/overview
tfds.list_builders()
['abstract_reasoning', 'accentdb', 'aeslc', 'aflw2k3d', 'ag_news_subset', 'ai2_arc', 'ai2_arc_with_ir', 'amazon_us_reviews', 'anli', 'arc', 'bair_robot_pushing_small', 'bccd', 'beans', 'big_patent', 'bigearthnet', 'billsum', 'binarized_mnist', 'binary_alpha_digits', 'blimp', 'bool_q', 'c4', 'caltech101', 'caltech_birds2010', 'caltech_birds2011', 'cars196', 'cassava', 'cats_vs_dogs', 'celeb_a', 'celeb_a_hq', 'cfq', 'cherry_blossoms', 'chexpert', 'cifar10', 'cifar100', 'cifar10_1', 'cifar10_corrupted', 'citrus_leaves', 'cityscapes', 'civil_comments', 'clevr', 'clic', 'clinc_oos', 'cmaterdb', 'cnn_dailymail', 'coco', 'coco_captions', 'coil100', 'colorectal_histology', 'colorectal_histology_large', 'common_voice', 'coqa', 'cos_e', 'cosmos_qa', 'covid19sum', 'crema_d', 'curated_breast_imaging_ddsm', 'cycle_gan', 'd4rl_mujoco_ant', 'd4rl_mujoco_halfcheetah', 'dart', 'davis', 'deep_weeds', 'definite_pronoun_resolution', 'dementiabank', 'diabetic_retinopathy_detection', 'div2k', 'dmlab', 'dolphin_number_word', 'downsampled_imagenet', 'drop', 'dsprites', 'dtd', 'duke_ultrasound', 'e2e_cleaned', 'efron_morris75', 'emnist', 'eraser_multi_rc', 'esnli', 'eurosat', 'fashion_mnist', 'flic', 'flores', 'food101', 'forest_fires', 'fuss', 'gap', 'geirhos_conflict_stimuli', 'gem', 'genomics_ood', 'german_credit_numeric', 'gigaword', 'glue', 'goemotions', 'gpt3', 'gref', 'groove', 'gtzan', 'gtzan_music_speech', 'hellaswag', 'higgs', 'horses_or_humans', 'howell', 'i_naturalist2017', 'imagenet2012', 'imagenet2012_corrupted', 'imagenet2012_real', 'imagenet2012_subset', 'imagenet_a', 'imagenet_r', 'imagenet_resized', 'imagenet_v2', 'imagenette', 'imagewang', 'imdb_reviews', 'irc_disentanglement', 'iris', 'kitti', 'kmnist', 'lambada', 'lfw', 'librispeech', 'librispeech_lm', 'libritts', 'ljspeech', 'lm1b', 'lost_and_found', 'lsun', 'lvis', 'malaria', 'math_dataset', 'mctaco', 'mlqa', 'mnist', 'mnist_corrupted', 'movie_lens', 'movie_rationales', 'movielens', 'moving_mnist', 'multi_news', 'multi_nli', 'multi_nli_mismatch', 'natural_questions', 'natural_questions_open', 'newsroom', 'nsynth', 'nyu_depth_v2', 'ogbg_molpcba', 'omniglot', 'open_images_challenge2019_detection', 'open_images_v4', 'openbookqa', 'opinion_abstracts', 'opinosis', 'opus', 'oxford_flowers102', 'oxford_iiit_pet', 'para_crawl', 'patch_camelyon', 'paws_wiki', 'paws_x_wiki', 'pet_finder', 'pg19', 'piqa', 'places365_small', 'plant_leaves', 'plant_village', 'plantae_k', 'qa4mre', 'qasc', 'quac', 'quickdraw_bitmap', 'race', 'radon', 'reddit', 'reddit_disentanglement', 'reddit_tifu', 'resisc45', 'robonet', 'rock_paper_scissors', 'rock_you', 's3o4d', 'salient_span_wikipedia', 'samsum', 'savee', 'scan', 'scene_parse150', 'schema_guided_dialogue', 'scicite', 'scientific_papers', 'sentiment140', 'shapes3d', 'siscore', 'smallnorb', 'snli', 'so2sat', 'speech_commands', 'spoken_digit', 'squad', 'stanford_dogs', 'stanford_online_products', 'star_cfq', 'starcraft_video', 'stl10', 'story_cloze', 'sun397', 'super_glue', 'svhn_cropped', 'tao', 'ted_hrlr_translate', 'ted_multi_translate', 'tedlium', 'tf_flowers', 'the300w_lp', 'tiny_shakespeare', 'titanic', 'trec', 'trivia_qa', 'tydi_qa', 'uc_merced', 'ucf101', 'vctk', 'vgg_face2', 'visual_domain_decathlon', 'voc', 'voxceleb', 'voxforge', 'waymo_open_dataset', 'web_nlg', 'web_questions', 'wider_face', 'wiki40b', 'wiki_bio', 'wiki_table_questions', 'wiki_table_text', 'wikiann', 'wikihow', 'wikipedia', 'wikipedia_toxicity_subtypes', 'wine_quality', 'winogrande', 'wmt13_translate', 'wmt14_translate', 'wmt15_translate', 'wmt16_translate', 'wmt17_translate', 'wmt18_translate', 'wmt19_translate', 'wmt_t2t_translate', 'wmt_translate', 'wordnet', 'wsc273', 'xnli', 'xquad', 'xsum', 'xtreme_pawsx', 'xtreme_xnli', 'yelp_polarity_reviews', 'yes_no', 'youtube_vis']
Get information on the Data
builder = tfds.builder('rock_paper_scissors')
info = builder.info
info
tfds.core.DatasetInfo( name='rock_paper_scissors', full_name='rock_paper_scissors/3.0.0', description=""" Images of hands playing rock, paper, scissor game. """, homepage='http://laurencemoroney.com/rock-paper-scissors-dataset', data_path='C:\\Users\\Valerie MASODA FOTSO\\tensorflow_datasets\\rock_paper_scissors\\3.0.0', download_size=Unknown size, dataset_size=Unknown size, features=FeaturesDict({ 'image': Image(shape=(300, 300, 3), dtype=tf.uint8), 'label': ClassLabel(shape=(), dtype=tf.int64, num_classes=3), }), supervised_keys=('image', 'label'), disable_shuffling=False, splits={ }, citation="""@ONLINE {rps, author = "Laurence Moroney", title = "Rock, Paper, Scissors Dataset", month = "feb", year = "2019", url = "http://laurencemoroney.com/rock-paper-scissors-dataset" }""", )
Prepare Rock, Paper, Scissors Data
ds_train = tfds.load(name="rock_paper_scissors", split="train")
Downloading and preparing dataset Unknown size (download: Unknown size, generated: Unknown size, total: Unknown size) to C:\Users\Valerie MASODA FOTSO\tensorflow_datasets\rock_paper_scissors\3.0.0... Dataset rock_paper_scissors downloaded and prepared to C:\Users\Valerie MASODA FOTSO\tensorflow_datasets\rock_paper_scissors\3.0.0. Subsequent calls will reuse this data.
ds_test = tfds.load(name="rock_paper_scissors", split="test")
Show examples
fig = tfds.show_examples(info, ds_train)
WARNING: For consistency with `tfds.load`, the `tfds.show_examples` signature has been modified from (info, ds) to (ds, info). The old signature is deprecated and will be removed. Please change your call to `tfds.show_examples(ds, info)`

fig = tfds.show_examples(ds_train, info)

Additional Data Preparation
train_images = np.array([example['image']for example in ds_train])
type(train_images[0])
numpy.ndarray
train_images.shape
(2520, 300, 300, 3)
train_images = np.array([example['image'].numpy()[:,:,0]for example in ds_train])
train_images = np.array([example['image'].numpy()for example in ds_train])
test_images = np.array([example['image'].numpy()[:,:,0]for example in ds_test]) test_images = np.array([example['image'].numpy()for example in ds_test])
Need to use -1 within the array values as shown below in order for it to work if not you will get a value error
train_images = train_images.reshape(2520, 300, 300, -1) test_images = test_images.reshape(372, 300, 300, -1)
train_images = train_images.reshape(2520, 300, 300, -1) test_images = test_images.reshape(372, 300, 300, -1) train_images = train_images.astype('float32') test_images = test_images.astype('float32') train_images /= 255 test_images /= 255
train_images.dtype
dtype('float32')
train_images[0].shape
(300, 300, 3)
Plot the first train_images
plt.imshow(train_images[0]) plt.show()

plt.imshow(train_images[1]) plt.show()

You must be logged in to post a comment.