Search the Community
Showing results for tags 'tensorflow'.
-
# Importing libraries I deemed necessary import tensorflow as tf from tensorflow.keras import layers import matplotlib.pyplot as plt import os import mediapipe as mp import cv2 import numpy as np # The training data set contains 78,000 images which are 200x200 pixels. There are 26 classes for the letters A-Z. # The test data set contains a mere 26 images, to encourage the use of real-world test images. # for dirname, _, filenames in os.walk(r'D:\ASL Recognition using CNN\Input_Images'): # print("Data Loading....") # for filename in filenames: # print(os.path.join(dirname, filename)) # print("Data Successfully Loaded") train_dataset = tf.keras.utils.image_dataset_from_directory( directory=r'D:\ASL Recognition using CNN\Input_Images\asl_alphabets\asl_alphabet_train', labels='inferred', label_mode='int', image_size=(200, 200), color_mode='rgb', batch_size=32, shuffle=True, seed=42, validation_split=0.2, subset='training' ) # for image, label in train_dataset.take(1): # print("Before preprocessing", image.dtype, label.dtype) # print(image) validation_dataset = tf.keras.utils.image_dataset_from_directory( directory=r'D:\ASL Recognition using CNN\Input_Images\asl_alphabets\asl_alphabet_train', labels='inferred', label_mode='int', image_size=(200, 200), color_mode='rgb', batch_size=32, shuffle=False, seed=42, validation_split=0.2, subset='validation' ) # test_dataset = tf.keras.utils.image_dataset_from_directory( # directory=r'D:\ASL Recognition using CNN\Input_Images\asl_alphabets\asl_alphabet_test', # labels=None, # image_size=(200, 200), # color_mode='rgb', # batch_size=32, # shuffle=False, # ) # Since the test_data didn't have subdirectories that reflected their class_names, # I couldn't use tf.keras.utils.image_dataset_from_directory() # So I had to create a function that extracts labels from file name and creates a dataset def test_dataset_generator(): test_image_directory = r'D:\ASL Recognition using CNN\Input_Images\asl_alphabets\asl_alphabet_test' batch_size = 26 img_height, img_width = 200, 200 # Get the list of test image file paths test_filepaths = tf.data.Dataset.list_files(os.path.join(test_image_directory, '*.jpg')) # Extract labels from file names def extract_label(file_path): # Assuming file names are like 'A_test.jpg' .... # and filepaths are like "D:\ASL Recognition using CNN\Input_Images\asl_alphabets\asl_alphabet_test\A_test.jpg" parts = tf.strings.split(tf.strings.split(file_path, '\\')[-1], '_') return parts[0] # Map file paths to images and labels def process_path(file_path): label = extract_label(file_path) img = tf.io.read_file(file_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, [img_height, img_width]) return img, label # Create the test dataset test_datasets = test_filepaths.map(process_path) test_datasets = test_datasets.batch(batch_size) return test_datasets test_dataset = test_dataset_generator() # Creating Preprocessing Layers data_augmentation = tf.keras.Sequential([ layers.RandomFlip("horizontal"), layers.RandomRotation(0.2), layers.Rescaling(1. / 255) ]) # data_rescaling = tf.keras.Sequential([ # layers.Rescaling(1. / 255) # ]) # Applying Preprocessing Layers to the dataset # We only augment the training data! # Configuring the datasets for performance, using parallel reads and buffered prefetching .... # ... to yield batches from disk without I/O become blocking. AUTOTUNE = tf.data.AUTOTUNE def preprocess(ds, augment=False): if augment: ds = ds.map(lambda image, label: (data_augmentation(image, training=True), label), num_parallel_calls=AUTOTUNE) return ds.prefetch(buffer_size=AUTOTUNE) train_preprocessed_dataset = preprocess(train_dataset, augment=True) validation_preprocessed_dataset = preprocess(validation_dataset, augment=True) test_preprocessed_dataset = preprocess(test_dataset) # for image, label in train_preprocessed_dataset.take(1): # print(image.dtype, label.dtype) # for image, label in train_dataset.take(1): # print(image.dtype, label.dtype) # Creating Mediapipe Hands Landmark Layers mp_hands = mp.solutions.hands hands = mp_hands.Hands( max_num_hands=2, min_detection_confidence=0.3, min_tracking_confidence=0.3, model_complexity=1, static_image_mode=True ) # Vectorized landmark extraction function using TensorFlow ops def feature_extraction(image): img = (image.numpy() * 255).astype(np.uint8) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) print(img.dtype, img.shape) # Apply MediaPipe Hands processing results = hands.process(img) # print("The resulting array is:", results.multi_hand_landmarks) # Handle cases where no hands are detected if results.multi_hand_landmarks is None: return tf.zeros((21, 3), dtype=tf.float32) # Placeholder for no hands # Extract landmarks with error handling try: # landmarks = tf.gather_nd(results.multi_hand_landmarks, [[0, 0, 0], [0, 1, 0], [0, 2, 0], ...]) landmarks = np.array( [landmark.x, landmark.y, landmark.z] for hand_landmarks in results.multi_hand_landmarks for landmark in hand_landmarks.landmark) except ValueError as e: print(f"Error extracting landmarks: {e}") return tf.zeros((21, 3), dtype=tf.float32) # Fallback placeholder return landmarks train_landmark_dataset = train_dataset.map(lambda x, y: (tf.py_function(feature_extraction, [x], tf.float32), y)) validation_landmark_dataset = validation_dataset.map(lambda x, y: (tf.py_function(feature_extraction, [x], tf.float32), y)) # train_combined_dataset = tf.data.Dataset.zip((train_preprocessed_dataset, train_landmark_dataset)) # validation_combined_dataset = tf.data.Dataset.zip((validation_preprocessed_dataset, validation_landmark_dataset)) for landmarks in train_landmark_dataset.take(1): print("\n\n\nThe landmarks of a batch are:", landmarks)
-
- python
- tensorflow
-
(and 1 more)
Tagged with:
-
Tensorflow model deeplab: https://github.com/tensorflow/tfjs-models/tree/master/deeplab doesn't seem to work for me on node.js. I haven't been able to find any example code for deeplab in nodejs. When I try to segment an image I get the error error: The dtype of dict['ImageTensor'] provided in model.execute(dict) must be int32, but was float32 And googling the error seemed to turn up no helpful soloutions. Would love thoughts from sombody who knows more!
- 11 replies
-
Hey guys, I am currently working on a Neural Network based project, and right now am using an old laptop for that. my laptop's gpu is not supported by tensorflow so i am thinking to build a pc. Its my first time with PC hardware so i don't have much idea on actual hardware but am a regular linustechtips viewer so have some theoretical knowledge of PC building For neural networks, GPU is the king. 95% work will be done by GPU, so I need a system with a decent GPU maybe 1050 or something more than that. Basically anything more than Compute Capability 3.0 will work for me but more the performance, better it is. I have attached the list of supported GPU's. RAM speed doesn't matter but amount of RAM matters maybe around 8GB . CPU doesnt matter at all. But I would like to have decent CPU to handle all daily tasks (and also some gaming ) The reason why I am asking about gaming is that, I am trying to build a neural network for self driving car and i am going to use GTA V as a simulator. so while training only GPU will be used but while testing network I will have to run an windowed instance of GTA V with my neural network, so it should be able to handle this load I am from India and my budget is around Rs.60,000 which is around $925 (US). So can you guys please suggest me about what hardware to choose? It would be really helpful for me. Thanks in advance.
-
Hi all I want to install Tensorflow gpu on my PC (Windows 10, Ryzen 1700, GTX 1660 Ti) and for this I need to know what the compute capability is of my GPU. This should be listed here, but it seems like they forgot to list the GTX 1660 Ti. Does anyone know what its compute capability might be? Also I tried this a couple years ago (on a different GPU) and I think that by installing CuDNN, I broke my graphics drivers and I could not game anymore until I deleted CuDNN again. Does anyone have experience with this or can anyone confirm if it's possible to install CuDNN while still being able to game on the same PC? Thanks a lot!!
- 2 replies
-
- cuda
- gtx 1660ti
-
(and 2 more)
Tagged with:
-
I am trying to install tensorflow.js on my windows machine npm install @tensorflow/tfjs-node-gpu whenever I do that I get the following error. I am using python 2.7 and have tried updating npm. Error: node-gyp rebuild failed with: Error: Command failed: node-gyp rebuild gyp ERR! build error gyp ERR! stack Error: `C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe` failed with exit code: 1 gyp ERR! stack at ChildProcess.onExit (C:\Program Files\nodejs\node_modules\npm\node_modules\node-gyp\lib\build.js:262:23) gyp ERR! stack at ChildProcess.emit (events.js:189:13) gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:248:12) gyp ERR! System Windows_NT 10.0.17134 gyp ERR! command "C:\\Program Files\\nodejs\\node.exe" "C:\\Program Files\\nodejs\\node_modules\\npm\\node_modules\\node-gyp\\bin\\node-gyp.js" "rebuild" gyp ERR! cwd C:\Coding\AI\node_modules\@tensorflow\tfjs-node-gpu gyp ERR! node -v v10.15.1 gyp ERR! node-gyp -v v3.8.0 gyp ERR! not ok
-
Hi everybody, I'm not much of a hardware guy and I'm building a PC (still planning on the components). I will use the rig primarily for a lot of computing and mathematical models and might even run TensorFlow or other similar resource crunching ML or AI tools and I'll also be running a lot of code to test out algorithms and stuff. I will not use the PC however to run graphics or video related softwares (no Adobe software or rendering software). And since this is not a sponsored project but more of a hobby or personal projects, I do not have an money tree in my backyard. My main focus as of right now is processor. So here it is: I'm preferring AMD, primarily because of the budget, but should I also look to Intel? And, currently my eyes are fixed on the Threadripper 2950X or 2920X. Is this a good idea or should I consider something else? Note: I know I can always upgrade my RAM / Storage, but a processor is something I feel will require a lot of upgrades on other components (especially the motherboard), so I want to get it right. Also, I might at times subject the PC to gaming but I don't want it be the primary goal.
- 15 replies
-
- cpu
- threadripper
-
(and 3 more)
Tagged with:
-
I Want to Install tensorflow-gpu on my computer. What have I done so far: 1) Installed the latest version of anaconda 2) Added C:\Users\user\Anaconda3 to PATH 3)Created environment for tensorflow with conda create --name tensorflow 4) Activated said environment with activate tensorflow 5) Installed tensorflow-gpu package with conda install tensorflow-gpu 6) Tried to import tensorflow as tf and got this error: Can anybody point me in the right direction?
-
I have made a rudimentary lstm neural-net using Tensorflow which can generate unique melodies using Tensorflow for my compsci (year 13 sixth form) coursework. However this alone isn't enough to score highly. I need advice on how to expand this project as I have hit a wall and I don't know where I can go from here. It can literally be anything, I just need a use-case or demonstration I can make, expanding on my project. My first idea (not original I know) was melody auto-completion, (a user uses a virtual piano to key in some notes and the program finishes it off for you) but my program needs more than a few notes of input to generate anything other than random garbage. I hope you guys have more imagination than me, please help
- 6 replies
-
- tensorflow
- python
- (and 4 more)
-
With Nvidia, can you mix two different graphics card architectures for GPGPU and tensorflow? //Currently AMD graphics card does not support tensorflow-gpu because of opencl.
-
Hello, so i am a fairly novice programmer, i have made an algorithm on tensorflow that predicts stock market prices and i would like to test it on a live platform. i how ever lack the skills to link it my tensorflow file to a trading broker. i do not have a good understanding of APIS. i am looking for help to implement the Machine learning algorithm on a live platform. I will provide you with all the code i have. any help would be appreciated. Thank you.
-
- programming
- tensorflow
-
(and 2 more)
Tagged with:
-
Hello, I have a razerblade 15 2018 advanced model 1060 maxq. And for local training, I plan to buy external GPU, but my concern is has anyone run tried this configuration and made it work? To be more specific, I plan to run this setup on ubuntu 18.04 on my blade and buy the razer core x, but while searching online, didn't find any proper guides. Has anyone made such a config work? It is a sizeable investment for me. Thanks
- 1 reply
-
- pytorch
- tensorflow
-
(and 1 more)
Tagged with:
-
So, I'll start from the top. It's a personal project which is basically Vehicle and Robot Hybrid. I have 22 motors in total while have voltage range of 5.5 - 12 volts and max 2.2 amps. Appart from that, I'll have to put in PC case fans for cooling. So, 2-3 of those. My question here are : 1) What Arduino and Raspberry Pi do I need in order to make this Robot control all motors? 2) What do I need to run Tensorflow? I already have a Logitech C270 USB webcam for this project. 3) Do I need to learn Python for this project?
- 4 replies
-
- robotic arms
- robotics
- (and 4 more)
-
I retrained an object detection model based on Google's Tensorflow object detection API. I exported it as a frozen inference graph. I would like to use it with CV2's DNN module: cap = cv2.VideoCapture(URL) cvNet = cv2.dnn.readNetFromTensorflow('graph.pb', 'graph.pbtxt') while True: ret, img = cap.read() rows = img.shape[0] cols = img.shape[1] cvNet.setInput(cv2.dnn.blobFromImage(img, 1.0/127.5, (300, 300), (127.5, 127.5, 127.5), swapRB=True, crop=False)) cvOut = cv2Net.forward() for detection in cv2Out[0,0,:,:]: score = float(detection[2]) if score > 0.3: left = detection[3] * cols top = detection[4] * rows right = detection[5] * cols bottom = detection[6] * rows cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (23, 230, 210), thickness=2) cv2.imshow('img', img) if cv2.waitKey(1) ==27: exit(0) I get this error: Const input blob for weights not found in function getConstBlob From my research, I believe I have to optimize the inference graph. I can't find any documentation as how to do this. If anyone could point me in the right direction, it would be very much appreciated.
-
Hi, I want to get started on supervised machine learning using neural networks. I know a fair bit about how neural networks work. I also have some coding experience (mainly C++, C#, python). Building my own basic neural network in C++ would be tedious, but would really make me understand everything in depth. The other option I consider is getting started using the tensorflow library for python. This would probably be more fun because the first good be results can be reached way more quickly. Which option is better? My second problem is that I am looking for an interesting, yet not to hard, little project to start with. Any suggestions? regards, Xilef
- 5 replies
-
- ai
- neuralnetwork
-
(and 2 more)
Tagged with:
-
I’m looking for a cheap affordable solution for a large storage. We are trying to use tensorflow to analyze pictures and this would be a storage server for the cameras that are collecting our data. So I don’t need anything crazy fast for this machine, since I’ll need to physically move each drive between locations for collecting data and analysis. So we plan on only collecting photos with this and then ship the drives for further processing. Where we are collecting the data is remote and cell service / internet is not an option. My my thoughts have been to use a dell r710 since it seems like people are having a lot of success with them even though the are getting old. I’m just wondering if other people have found better solutions to hold more high capacity drives. The more drives the less often I’ll have to collect the data, but I’m also trying to keep as cheap as possible. Thanks for the suggestions. Ps. Love the tech tips on youtube!
-
I'm not sure whether this is the place to post this....but here it goes: Would it be possible to train a tensorflow network on wikipedia articles (a bit like Watson)? If so, how would I do this?
- 5 replies
-
- tensorflow
- neural network
-
(and 1 more)
Tagged with:
-
So, I need a laptop that is able to perform Deep Learning in it, via TensorFlow. I already have a laptop that has 2GB of dedicated graphics but when I start to train a model on TensorFlow via python, I always end up with a "Resource Allocation error" meaning 2GB of nVidia Graphics is not enough. I tried lowering the Training data size (batch_size) and still no luck. So I browsed online solutions that resulted at requirement of higher Graphics memory. So i decided to buy a new powerful laptop that supports TensorFlow and Deep Learning algorithms in python especially training. I chose MSI GL62M aas of now but I am unsure whether it will be able support. If anyone can suggest me or tell me whether the laptop is compatible or if a better one available.
-
Hello and sorry in advance if I posted this in a wrong part of the forum - I'm new here. I wanted to suggest to the LTT team to start doing machine learning benchmarks and wanted to provide them with a relatively simple way to do it. I am a physicist and doing particle physics research is almost entirely based on machine learning nowadays. Seeing some benchmarks of new GPUs and CPUs would be rather beneficial - knowing stuff like whether a much cheaper 1080Ti is just as fast as a K80 is definitely useful to many people in that niche - all you nerds screaming now about the bonus of double precision I know it is important but not for all research. I wanted to propose to start benchmarking using TensorFlow from Google, mostly because of the ease of use and automated threading - trust me, you don't want to use CERN's proprietary ROOT, it's ugly and outdated by around 20 years. Below I link a project on my GitHub which uses MNIST (handwritten digits) examples passed through a Convolutional Neural Network for classifying them - extremely simple computer vision if you'd like. I also added almost full support for TensorBoard results visualisation software which is rather marvellous though its small precision on time measurements may make you want to add a snipett of code to measure it yourself (or make many more epochs than this example - easily tweakable - and divide by total time shown and live with the slight error). Link: https://github.com/krisfur/TensorBoard-CNN-Visualization-Example For benchmarking CPUs I'd suggest using the not GPU-accelerated version of TensorFlow. For benchmarking GPUs naturally install the GPU accelerated version, but remember that if installation of that has problems you can overcome some of them by disabling the integrated GPU of your CPU - definitely need to do that on laptops and we are reaching a point where laptops could actually do some basic machine learning stuff. Also you should run it with at least 16GB of ram as the final evaluation basically loads the whole thing onto the memory for evaluation. More ram isn't a problem for you guys anyway. I will attach some screenshots of how nice the TensorBoard output looks. I'd love to provide something different than the usual MNIST example but I'm not in possession of some good practice data that I would be allowed to share and can't be bothered to make some mock data as you may still never use it. Sidenotes: Benchmarking based on final accuracy isn't always the best choice, there's a lot of randomness involved - you should only care about the time. You could save a trained model and just run test batches through it if you want to do that, but that's a bit more work than I have time to describe here - not necessarily hard, but you wouldn't be able to get a lot of data about training speed if you don't do the training (duh!). MNIST images are tiny so training times are short, meaning you can benchmark a lot quickly, with bigger images you'd need to do some tweaking to the CNN and it would take a long time. I remember doing computer vision on ~1500x1500px images and it literally took days on a system with two K80s and we did have to introduce a downsampling mechanism. Also using MNIST data means you just download them every time and delete afterwards, so do have an internet connection. Using your own images requires making a way of reading and batching them which is a day's work. If you do end up using this could you please mention me in a video, my mates would die haha. If not it's fine anyway. Cheers from London.
-
- machine learning
- benchmarks
-
(and 2 more)
Tagged with: