diff --git a/docs/detectnet-camera-2.md b/docs/detectnet-camera-2.md index 04706742a..9ec17e45b 100644 --- a/docs/detectnet-camera-2.md +++ b/docs/detectnet-camera-2.md @@ -7,8 +7,8 @@ Up next we have a realtime object detection camera demo available for C++ and Python: -- [`detectnet-camera.cpp`](../examples/detectnet-camera/detectnet-camera.cpp) (C++) -- [`detectnet-camera.py`](../python/examples/detectnet-camera.py) (Python) +- [`detectnet-camera.cpp`](../examples/detectnet-camera/detectnet-camera.cpp) (C++) +- [`detectnet-camera.py`](../python/examples/detectnet-camera.py) (Python) Similar to the previous [`detectnet-console`](detectnet-console-2.md) example, these camera applications use detection networks, except that they process a live video feed from a camera. `detectnet-camera` accepts various **optional** command-line parameters, including: @@ -23,11 +23,12 @@ Similar to the previous [`detectnet-console`](detectnet-console-2.md) example, t - The default is to use MIPI CSI sensor 0 (`--camera=0`) - `--width` and `--height` flags setting the camera resolution (default is `1280x720`) - The resolution should be set to a format that the camera supports. - - Query the available formats with the following commands: + - Query the available formats with the following commands: ``` bash $ sudo apt-get install v4l-utils $ v4l2-ctl --list-formats-ext ``` +- `--fps` flag setting the camera fps (default is `30`) You can combine the usage of these flags as needed, and there are additional command line parameters available for loading custom models. Launch the application with the `--help` flag to recieve more info, or see the [`Examples`](../README.md#code-examples) readme. diff --git a/docs/detectnet-camera.md b/docs/detectnet-camera.md index ef10bd791..037eccc5e 100644 --- a/docs/detectnet-camera.md +++ b/docs/detectnet-camera.md @@ -7,8 +7,8 @@ Up next we have a realtime object detection camera demo available for C++ and Python: -- [`detectnet-camera.cpp`](../examples/detectnet-camera/detectnet-camera.cpp) (C++) -- [`detectnet-camera.py`](../python/examples/detectnet-camera.py) (Python) +- [`detectnet-camera.cpp`](../examples/detectnet-camera/detectnet-camera.cpp) (C++) +- [`detectnet-camera.py`](../python/examples/detectnet-camera.py) (Python) Similar to the previous [`detectnet-console`](detectnet-console.md) example, these camera applications use detection networks, except that they process a live video feed from a camera. `detectnet-camera` accepts 4 optional command-line parameters: @@ -20,11 +20,12 @@ Similar to the previous [`detectnet-console`](detectnet-console.md) example, the - The default is to use MIPI CSI sensor 0 (`--camera=0`) - `--width` and `--height` flags setting the camera resolution (default is `1280x720`) - The resolution should be set to a format that the camera supports. - - Query the available formats with the following commands: + - Query the available formats with the following commands: ``` bash $ sudo apt-get install v4l-utils $ v4l2-ctl --list-formats-ext ``` +- `--fps` flag setting the camera fps (default is `30`) You can combine the usage of these flags as needed, and there are additional command line parameters available for loading custom models. Launch the application with the `--help` flag to recieve more info, or see the [`Examples`](../README.md#code-examples) readme. diff --git a/docs/detectnet-example-2.md b/docs/detectnet-example-2.md index 0f8626873..c92f38ea3 100644 --- a/docs/detectnet-example-2.md +++ b/docs/detectnet-example-2.md @@ -22,8 +22,8 @@ import jetson.inference import jetson.utils ``` -> **note**: these Jetson modules are installed during the `sudo make install` step of [building the repo](building-repo-2.md#compiling-the-project). ->           if you did not run `sudo make install`, then these packages won't be found when the example is run. +> **note**: these Jetson modules are installed during the `sudo make install` step of [building the repo](building-repo-2.md#compiling-the-project). +>           if you did not run `sudo make install`, then these packages won't be found when the example is run. #### Loading the Detection Model @@ -41,21 +41,21 @@ Note that you can change the model string to one of the values from [this table] To connect to the camera device for streaming, we'll create an instance of the [`gstCamera`](https://rawgit.com/dusty-nv/jetson-inference/pytorch/docs/html/python/jetson.utils.html#gstCamera) object: ``` python -camera = jetson.utils.gstCamera(1280, 720, "/dev/video0") # using V4L2 +camera = jetson.utils.gstCamera(1280, 720, 30, "/dev/video0") # using V4L2 ``` -It's constructor accepts 3 parameters - the desired width, height, and video device to use. Substitute the following snippet depending on if you are using a MIPI CSI camera or a V4L2 USB camera, along with the preferred resolution: +It's constructor accepts 4 parameters - the desired width, height, fps and video device to use. Substitute the following snippet depending on if you are using a MIPI CSI camera or a V4L2 USB camera, along with the preferred resolution: -- MIPI CSI cameras are used by specifying the sensor index (`"0"` or `"1"`, ect.) +- MIPI CSI cameras are used by specifying the sensor index (`"0"` or `"1"`, ect.) ``` python - camera = jetson.utils.gstCamera(1280, 720, "0") + camera = jetson.utils.gstCamera(1280, 720, 30, "0") ``` -- V4L2 USB cameras are used by specifying their `/dev/video` node (`"/dev/video0"`, `"/dev/video1"`, ect.) +- V4L2 USB cameras are used by specifying their `/dev/video` node (`"/dev/video0"`, `"/dev/video1"`, ect.) ``` python - camera = jetson.utils.gstCamera(1280, 720, "/dev/video0") + camera = jetson.utils.gstCamera(1280, 720, 30, "/dev/video0") ``` - The width and height should be a resolution that the camera supports. - - Query the available resolutions with the following commands: + - Query the available resolutions with the following commands: ``` bash $ sudo apt-get install v4l-utils $ v4l2-ctl --list-formats-ext diff --git a/docs/imagenet-camera-2.md b/docs/imagenet-camera-2.md index 63bd3f34f..57d07d446 100644 --- a/docs/imagenet-camera-2.md +++ b/docs/imagenet-camera-2.md @@ -1,14 +1,14 @@

Back | Next | Contents
-Image Recognition

+Image Recognition

# Running the Live Camera Recognition Demo Next we have a realtime image recognition camera demo available for C++ and Python: -- [`imagenet-camera.cpp`](../examples/imagenet-camera/imagenet-camera.cpp) (C++) -- [`imagenet-camera.py`](../python/examples/imagenet-camera.py) (Python) +- [`imagenet-camera.cpp`](../examples/imagenet-camera/imagenet-camera.cpp) (C++) +- [`imagenet-camera.py`](../python/examples/imagenet-camera.py) (Python) Similar to the previous [`imagenet-console`](imagenet-console-2.md) example, the camera applications are built to the `/aarch64/bin` directory. They run on a live camera stream with OpenGL rendering and accept 4 optional command-line arguments: @@ -20,11 +20,12 @@ Similar to the previous [`imagenet-console`](imagenet-console-2.md) example, the - The default is to use MIPI CSI sensor 0 (`--camera=0`) - `--width` and `--height` flags setting the camera resolution (default is `1280x720`) - The resolution should be set to a format that the camera supports. - - Query the available formats with the following commands: + - Query the available formats with the following commands: ``` bash $ sudo apt-get install v4l-utils $ v4l2-ctl --list-formats-ext ``` +- `--fps` flag setting the camera fps (default is `30`) You can combine the usage of these flags as needed, and there are additional command line parameters available for loading custom models. Launch the application with the `--help` flag to recieve more info, or see the [`Examples`](../README.md#code-examples) readme. diff --git a/docs/imagenet-camera.md b/docs/imagenet-camera.md index 1d9d1d04c..1269ef2a0 100644 --- a/docs/imagenet-camera.md +++ b/docs/imagenet-camera.md @@ -1,14 +1,14 @@

Back | Next | Contents
-Image Recognition

+Image Recognition

# Running the Live Camera Recognition Demo Next we have a realtime image recognition camera demo available for C++ and Python: -- [`imagenet-camera.cpp`](../examples/imagenet-camera/imagenet-camera.cpp) (C++) -- [`imagenet-camera.py`](../python/examples/imagenet-camera.py) (Python) +- [`imagenet-camera.cpp`](../examples/imagenet-camera/imagenet-camera.cpp) (C++) +- [`imagenet-camera.py`](../python/examples/imagenet-camera.py) (Python) Similar to the previous [`imagenet-console`](imagenet-console.md) example, the camera applications are built to the `/aarch64/bin` directory. They run on a live camera stream with OpenGL rendering and accept 4 optional command-line arguments: @@ -20,11 +20,12 @@ Similar to the previous [`imagenet-console`](imagenet-console.md) example, the c - The default is to use MIPI CSI sensor 0 (`--camera=0`) - `--width` and `--height` flags setting the camera resolution (default is `1280x720`) - The resolution should be set to a format that the camera supports. - - Query the available formats with the following commands: + - Query the available formats with the following commands: ``` bash $ sudo apt-get install v4l-utils $ v4l2-ctl --list-formats-ext ``` +- `--fps` flag setting the camera fps (default is `30`) You can combine the usage of these flags as needed, and there are additional command line parameters available for loading custom models. Launch the application with the `--help` flag to recieve more info, or see the [`Examples`](../README.md#code-examples) readme. diff --git a/docs/pytorch-collect.md b/docs/pytorch-collect.md index 39a0bce02..5cf5a5626 100644 --- a/docs/pytorch-collect.md +++ b/docs/pytorch-collect.md @@ -71,7 +71,7 @@ Next, we'll cover the command-line options for starting the tool. ## Launching the Tool -The source for the `camera-capture` tool can be found under [`jetson-inference/tools/camera-capture/`](../tools/camera-capture), and like the other programs from the repo it gets built to the `aarch64/bin` directory and installed under `/usr/local/bin/` +The source for the `camera-capture` tool can be found under [`jetson-inference/tools/camera-capture/`](../tools/camera-capture), and like the other programs from the repo it gets built to the `aarch64/bin` directory and installed under `/usr/local/bin/` The `camera-capture` tool accepts 3 optional command-line arguments: @@ -81,11 +81,12 @@ The `camera-capture` tool accepts 3 optional command-line arguments: - The default is to use MIPI CSI sensor 0 (`--camera=0`) - `--width` and `--height` flags setting the camera resolution (default is `1280x720`) - The resolution should be set to a format that the camera supports. - - Query the available formats with the following commands: + - Query the available formats with the following commands: ``` bash $ sudo apt-get install v4l-utils $ v4l2-ctl --list-formats-ext ``` +- `--fps` flag setting the camera fps (default is `30`) Below are some example commands for launching the tool: @@ -106,7 +107,7 @@ Below is the `Data Capture Control` window, which allows you to pick the desired -First, open the dataset path and class labels. The tool will then create the dataset structure discussed above (unless these subdirectories already exist), and you will see your object labels populated inside the `Current Class` drop-down. +First, open the dataset path and class labels. The tool will then create the dataset structure discussed above (unless these subdirectories already exist), and you will see your object labels populated inside the `Current Class` drop-down. Then position the camera at the object or scene you have currently selected in the drop-down, and click the `Capture` button (or press the spacebar) when you're ready to take an image. The images will be saved under that class subdirectory in the train, val, or test set. The status bar displays how many images have been saved under that category. @@ -162,7 +163,7 @@ Next we encourage you to experiment and apply what you've learned to other proje * use GPIO to trigger external actuators or LEDs when an object is detected * an autonomous robot that can find or follow an object -* a handheld battery-powered camera + Jetson + mini-display +* a handheld battery-powered camera + Jetson + mini-display * an interactive toy or treat dispenser for your pet * a smart doorbell camera that greets your guests diff --git a/docs/segnet-camera-2.md b/docs/segnet-camera-2.md index acc397cb8..da1d7b69c 100644 --- a/docs/segnet-camera-2.md +++ b/docs/segnet-camera-2.md @@ -7,7 +7,7 @@ Next we'll run realtime semantic segmentation on a live camera feed, available for C++ and Python: - [`segnet-camera.cpp`](../examples/segnet-camera/segnet-camera.cpp) (C++) -- [`segnet-camera.py`](../python/examples/segnet-camera.py) (Python) +- [`segnet-camera.py`](../python/examples/segnet-camera.py) (Python) Similar to the previous [`segnet-console`](segnet-console-2.md) example, these camera applications use segmentation networks, except that they process a live video feed instead. `segnet-camera` accepts various **optional** command-line parameters, including: @@ -20,11 +20,12 @@ Similar to the previous [`segnet-console`](segnet-console-2.md) example, these c - The default is to use MIPI CSI sensor 0 (`--camera=0`) - `--width` and `--height` flags setting the camera resolution (default is `1280x720`) - The resolution should be set to a format that the camera supports. - - Query the available formats with the following commands: + - Query the available formats with the following commands: ``` bash $ sudo apt-get install v4l-utils $ v4l2-ctl --list-formats-ext ``` +- `--fps` flag setting the camera fps (default is `30`) You can combine the usage of these flags as needed, and there are additional command line parameters available for loading custom models. Launch the application with the `--help` flag to recieve more info, or see the [`Examples`](../README.md#code-examples) readme. Below are some typical scenarios for launching the program - see [this table](segnet-console-2.md#pre-trained-segmentation-models-available) for the models available to use. diff --git a/examples/detectnet-camera/detectnet-camera.cpp b/examples/detectnet-camera/detectnet-camera.cpp index 65f8902cd..147dc315a 100644 --- a/examples/detectnet-camera/detectnet-camera.cpp +++ b/examples/detectnet-camera/detectnet-camera.cpp @@ -56,6 +56,7 @@ int usage() printf(" by default, MIPI CSI camera 0 will be used.\n"); printf(" --width WIDTH desired width of camera stream (default is 1280 pixels)\n"); printf(" --height HEIGHT desired height of camera stream (default is 720 pixels)\n"); + printf(" --fps FPS desired FPS of camera stream (default is 30)\n"); printf(" --threshold VALUE minimum threshold for detection (default is 0.5)\n\n"); printf("%s\n", detectNet::Usage()); @@ -86,6 +87,7 @@ int main( int argc, char** argv ) */ gstCamera* camera = gstCamera::Create(cmdLine.GetInt("width", gstCamera::DefaultWidth), cmdLine.GetInt("height", gstCamera::DefaultHeight), + cmdLine.GetInt("fps", gstCamera::DefaultFps), cmdLine.GetString("camera")); if( !camera ) @@ -93,18 +95,19 @@ int main( int argc, char** argv ) printf("\ndetectnet-camera: failed to initialize camera device\n"); return 0; } - + printf("\ndetectnet-camera: successfully initialized camera device\n"); printf(" width: %u\n", camera->GetWidth()); printf(" height: %u\n", camera->GetHeight()); + printf(" fps: %u\n", camera->GetFps()); printf(" depth: %u (bpp)\n\n", camera->GetPixelDepth()); - + /* * create detection network */ detectNet* net = detectNet::Create(argc, argv); - + if( !net ) { printf("detectnet-camera: failed to load detectNet model\n"); @@ -113,14 +116,14 @@ int main( int argc, char** argv ) // parse overlay flags const uint32_t overlayFlags = detectNet::OverlayFlagsFromStr(cmdLine.GetString("overlay", "box,labels,conf")); - + /* * create openGL window */ glDisplay* display = glDisplay::Create(); - if( !display ) + if( !display ) printf("detectnet-camera: failed to create openGL display\n"); @@ -132,38 +135,38 @@ int main( int argc, char** argv ) printf("detectnet-camera: failed to open camera for streaming\n"); return 0; } - + printf("detectnet-camera: camera open for streaming\n"); - - + + /* * processing loop */ float confidence = 0.0f; - + while( !signal_recieved ) { // capture RGBA image float* imgRGBA = NULL; - + if( !camera->CaptureRGBA(&imgRGBA, 1000) ) printf("detectnet-camera: failed to capture RGBA image from camera\n"); // detect objects in the frame detectNet::Detection* detections = NULL; - + const int numDetections = net->Detect(imgRGBA, camera->GetWidth(), camera->GetHeight(), &detections, overlayFlags); - + if( numDetections > 0 ) { printf("%i objects detected\n", numDetections); - + for( int n=0; n < numDetections; n++ ) { printf("detected obj %i class #%u (%s) confidence=%f\n", n, detections[n].ClassID, net->GetClassDesc(detections[n].ClassID), detections[n].Confidence); - printf("bounding box %i (%f, %f) (%f, %f) w=%f h=%f\n", n, detections[n].Left, detections[n].Top, detections[n].Right, detections[n].Bottom, detections[n].Width(), detections[n].Height()); + printf("bounding box %i (%f, %f) (%f, %f) w=%f h=%f\n", n, detections[n].Left, detections[n].Top, detections[n].Right, detections[n].Bottom, detections[n].Width(), detections[n].Height()); } - } + } // update display if( display != NULL ) @@ -184,13 +187,13 @@ int main( int argc, char** argv ) // print out timing info net->PrintProfilerTimes(); } - + /* * destroy resources */ printf("detectnet-camera: shutting down...\n"); - + SAFE_DELETE(camera); SAFE_DELETE(display); SAFE_DELETE(net); diff --git a/examples/homography-camera/homography-camera.cpp b/examples/homography-camera/homography-camera.cpp index 54b3a7912..7dc0b8310 100644 --- a/examples/homography-camera/homography-camera.cpp +++ b/examples/homography-camera/homography-camera.cpp @@ -61,6 +61,7 @@ int main( int argc, char** argv ) */ gstCamera* camera = gstCamera::Create(cmdLine.GetInt("width", gstCamera::DefaultWidth), cmdLine.GetInt("height", gstCamera::DefaultHeight), + cmdLine.GetInt("fps", gstCamera::DefaultFps), cmdLine.GetString("camera")); if( !camera ) @@ -68,21 +69,23 @@ int main( int argc, char** argv ) printf("\nhomography-camera: failed to initialize camera device\n"); return 0; } - + const uint32_t imgWidth = camera->GetWidth(); const uint32_t imgHeight = camera->GetHeight(); + const uint32_t fps = camera->GetFps(); printf("\nhomography-camera: successfully initialized camera device\n"); printf(" width: %u\n", imgWidth); printf(" height: %u\n", imgHeight); + printf(" fps: %u\n", fps); printf(" depth: %u (bpp)\n\n", camera->GetPixelDepth()); - + /* * create homography network */ homographyNet* net = homographyNet::Create(argc, argv); - + if( !net ) { printf("homography-camera: failed to initialize homographyNet\n"); @@ -107,10 +110,10 @@ int main( int argc, char** argv ) * create openGL window */ glDisplay* display = glDisplay::Create(); - + if( !display ) printf("homography-camera: failed to create openGL display\n"); - + /* * start streaming @@ -120,18 +123,18 @@ int main( int argc, char** argv ) printf("homography-camera: failed to open camera for streaming\n"); return 0; } - + printf("homography-camera: camera open for streaming\n"); - - + + /* * stabilize the camera video */ float* lastImg = NULL; - float displacementAvg[] = {0,0,0,0,0,0,0,0}; // average the camera displacement over a series of frames + float displacementAvg[] = {0,0,0,0,0,0,0,0}; // average the camera displacement over a series of frames const float displacementAvgFactor = 1.0f; // to smooth it out over time (factor of 1.0 = instant) - + while( !signal_recieved ) { // capture RGBA image @@ -193,18 +196,18 @@ int main( int argc, char** argv ) // check if the user quit if( display->IsClosed() ) - signal_recieved = true; + signal_recieved = true; } lastImg = imgRGBA; } - - + + /* * destroy resources */ printf("homography-camera: shutting down...\n"); - + SAFE_DELETE(camera); SAFE_DELETE(display); SAFE_DELETE(net); diff --git a/examples/imagenet-camera/imagenet-camera.cpp b/examples/imagenet-camera/imagenet-camera.cpp index 0de940ce8..264bfc366 100644 --- a/examples/imagenet-camera/imagenet-camera.cpp +++ b/examples/imagenet-camera/imagenet-camera.cpp @@ -54,6 +54,7 @@ int usage() printf(" by default, MIPI CSI camera 0 will be used.\n"); printf(" --width WIDTH desired width of camera stream (default is 1280 pixels)\n"); printf(" --height HEIGHT desired height of camera stream (default is 720 pixels)\n\n"); + printf(" --fps FPS desired FPS of camera stream (default is 30 pixels)\n\n"); printf("%s\n", imageNet::Usage()); return 0; @@ -69,7 +70,7 @@ int main( int argc, char** argv ) if( cmdLine.GetFlag("help") ) return usage(); - + /* * attach signal handler */ @@ -82,25 +83,27 @@ int main( int argc, char** argv ) */ gstCamera* camera = gstCamera::Create(cmdLine.GetInt("width", gstCamera::DefaultWidth), cmdLine.GetInt("height", gstCamera::DefaultHeight), + cmdLine.GetInt("fps", gstCamera::DefaultFps), cmdLine.GetString("camera")); - + if( !camera ) { printf("\nimagenet-camera: failed to initialize camera device\n"); return 0; } - + printf("\nimagenet-camera: successfully initialized camera device\n"); printf(" width: %u\n", camera->GetWidth()); printf(" height: %u\n", camera->GetHeight()); + printf(" fps: %u\n", camera->GetFps()); printf(" depth: %u (bpp)\n\n", camera->GetPixelDepth()); - + /* * create recognition network */ imageNet* net = imageNet::Create(argc, argv); - + if( !net ) { printf("imagenet-console: failed to initialize imageNet\n"); @@ -113,7 +116,7 @@ int main( int argc, char** argv ) */ glDisplay* display = glDisplay::Create(); cudaFont* font = cudaFont::Create(); - + /* * start streaming @@ -123,39 +126,39 @@ int main( int argc, char** argv ) printf("\nimagenet-camera: failed to open camera for streaming\n"); return 0; } - + printf("\nimagenet-camera: camera open for streaming\n"); - - + + /* * processing loop */ float confidence = 0.0f; - + while( !signal_recieved ) { float* imgRGBA = NULL; - + // get the latest frame if( !camera->CaptureRGBA(&imgRGBA, 1000) ) printf("\nimagenet-camera: failed to capture frame\n"); // classify image const int img_class = net->Classify(imgRGBA, camera->GetWidth(), camera->GetHeight(), &confidence); - + if( img_class >= 0 ) { - printf("imagenet-camera: %2.5f%% class #%i (%s)\n", confidence * 100.0f, img_class, net->GetClassDesc(img_class)); + printf("imagenet-camera: %2.5f%% class #%i (%s)\n", confidence * 100.0f, img_class, net->GetClassDesc(img_class)); if( font != NULL ) { char str[256]; sprintf(str, "%05.2f%% %s", confidence * 100.0f, net->GetClassDesc(img_class)); - + font->OverlayText((float4*)imgRGBA, camera->GetWidth(), camera->GetHeight(), str, 5, 5, make_float4(255, 255, 255, 255), make_float4(0, 0, 0, 100)); } - } + } // update display if( display != NULL ) @@ -165,7 +168,7 @@ int main( int argc, char** argv ) // update status bar char str[256]; sprintf(str, "TensorRT %i.%i.%i | %s | %s | Network %.0f FPS", NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, net->GetNetworkName(), precisionTypeToStr(net->GetPrecision()), net->GetNetworkFPS()); - display->SetTitle(str); + display->SetTitle(str); // check if the user quit if( display->IsClosed() ) @@ -174,17 +177,17 @@ int main( int argc, char** argv ) net->PrintProfilerTimes(); } - - + + /* * destroy resources */ printf("imagenet-camera: shutting down...\n"); - + SAFE_DELETE(camera); SAFE_DELETE(display); SAFE_DELETE(net); - + printf("imagenet-camera: shutdown complete.\n"); return 0; } diff --git a/examples/segnet-camera/segnet-camera.cpp b/examples/segnet-camera/segnet-camera.cpp index 754d299d9..6c6fd62ff 100644 --- a/examples/segnet-camera/segnet-camera.cpp +++ b/examples/segnet-camera/segnet-camera.cpp @@ -57,6 +57,7 @@ int usage() printf(" by default, MIPI CSI camera 0 will be used.\n"); printf(" --width WIDTH desired width of camera stream (default: 1280 pixels)\n"); printf(" --height HEIGHT desired height of camera stream (default: 720 pixels)\n"); + printf(" --fps FPS desired fps of camera stream (default: 30)\n"); printf(" --alpha ALPHA overlay alpha blending value, range 0-255 (default: 120)\n"); printf(" --filter-mode MODE filtering mode used during visualization,\n"); printf(" options are 'point' or 'linear' (default: 'linear')\n"); @@ -77,7 +78,7 @@ int main( int argc, char** argv ) if( cmdLine.GetFlag("help") ) return usage(); - + /* * attach signal handler */ @@ -90,6 +91,7 @@ int main( int argc, char** argv ) */ gstCamera* camera = gstCamera::Create(cmdLine.GetInt("width", gstCamera::DefaultWidth), cmdLine.GetInt("height", gstCamera::DefaultHeight), + cmdLine.GetInt("fps", gstCamera::DefaultFps), cmdLine.GetString("camera")); if( !camera ) @@ -97,28 +99,30 @@ int main( int argc, char** argv ) printf("\nsegnet-camera: failed to initialize camera device\n"); return 0; } - + const uint32_t width = camera->GetWidth(); const uint32_t height = camera->GetHeight(); + const uint32_t fps = camera->GetFps(); printf("\nsegnet-camera: successfully initialized camera device\n"); printf(" width: %u\n", width); printf(" height: %u\n", height); + printf(" fps: %u\n", fps); printf(" depth: %u (bpp)\n\n", camera->GetPixelDepth()); - + /* * create segmentation network */ segNet* net = segNet::Create(argc, argv); - + if( !net ) { printf("segnet-camera: failed to initialize imageNet\n"); return 0; } - // set alpha blending value for classes that don't explicitly already have an alpha + // set alpha blending value for classes that don't explicitly already have an alpha net->SetOverlayAlpha(cmdLine.GetFloat("alpha", 120.0f)); // get the desired alpha blend filtering mode @@ -127,7 +131,7 @@ int main( int argc, char** argv ) // get the object class to ignore (if any) const char* ignoreClass = cmdLine.GetString("ignore-class", "void"); - + /* * allocate segmentation overlay output buffers */ @@ -145,16 +149,16 @@ int main( int argc, char** argv ) printf("segnet-camera: failed to allocate CUDA memory for mask image\n"); return 0; } - + /* * create openGL window */ glDisplay* display = glDisplay::Create(); - + if( !display ) printf("segnet-camera: failed to create openGL display\n"); - + /* * start streaming @@ -164,20 +168,20 @@ int main( int argc, char** argv ) printf("segnet-camera: failed to open camera for streaming\n"); return 0; } - + printf("segnet-camera: camera open for streaming\n"); - - + + /* * processing loop */ float confidence = 0.0f; - + while( !signal_recieved ) { // capture RGBA image float* imgRGBA = NULL; - + if( !camera->CaptureRGBA(&imgRGBA, 1000, true) ) printf("segnet-camera: failed to convert from NV12 to RGBA\n"); @@ -187,7 +191,7 @@ int main( int argc, char** argv ) printf("segnet-console: failed to process segmentation\n"); continue; } - + // generate overlay if( !net->Overlay(imgOverlay, width, height, filterMode) ) { @@ -201,7 +205,7 @@ int main( int argc, char** argv ) printf("segnet-console: failed to process segmentation mask.\n"); continue; } - + // update display if( display != NULL ) { @@ -225,19 +229,19 @@ int main( int argc, char** argv ) signal_recieved = true; } - // wait for the GPU to finish + // wait for the GPU to finish CUDA(cudaDeviceSynchronize()); // print out timing info net->PrintProfilerTimes(); } - + /* * destroy resources */ printf("segnet-camera: shutting down...\n"); - + SAFE_DELETE(camera); SAFE_DELETE(display); SAFE_DELETE(net); diff --git a/tools/camera-capture b/tools/camera-capture index 2499bf422..313b35d7a 160000 --- a/tools/camera-capture +++ b/tools/camera-capture @@ -1 +1 @@ -Subproject commit 2499bf4220276a2be90c2d204d781d4836b26493 +Subproject commit 313b35d7a12d83d2be0aa7f079eeb691c5e35ed3 diff --git a/utils b/utils index 798c416c1..941cfc076 160000 --- a/utils +++ b/utils @@ -1 +1 @@ -Subproject commit 798c416c175d509571859c9290257bd5cce1fd63 +Subproject commit 941cfc0762d7199a2d9165a58d34d139660149a9