空飛ぶロボットのつくりかた

ロボットをつくるために必要な技術をまとめます。ロボットの未来についても考えたりします。

Oepn3Dのお勉強(三回目)

やりたいこと

Depthセンサで取得したデータをOpen3Dで自由自在に操りたい

f:id:robonchu:20180224200259p:plain

教科書

Open3D: A Modern Library for 3D Data Processing — Open3D 0.1 dev documentation

Colored point cloud registration

Point to Point ICPでは幾何的な平面は揃うが、平面の三角形の模様などはズレてしまう.

しかし、Colored point cloud registrationでは色情報から、模様などからフィッティングできる.

Colored Point Cloud Registration Revisited, ICCV 2017

の実装👇

  1. Downsample with a voxel size.

  2. Estimate normal.

  3. Applying colored point cloud registration.

# src/Python/Tutorial/Advanced/colored_pointcloud_registration.py

import sys
import numpy as np
import copy
sys.path.append("../..")
from py3d import *


def draw_registration_result_original_color(source, target, transformation):
    source_temp = copy.deepcopy(source)
    source_temp.transform(transformation)
    draw_geometries([source_temp, target])


if __name__ == "__main__":

    print("1. Load two point clouds and show initial pose")
    source = read_point_cloud("../../TestData/ColoredICP/frag_115.ply")
    target = read_point_cloud("../../TestData/ColoredICP/frag_116.ply")

    # draw initial alignment
    current_transformation = np.identity(4)
    draw_registration_result_original_color(
            source, target, current_transformation)

    # point to plane ICP
    current_transformation = np.identity(4);
    print("2. Point-to-plane ICP registration is applied on original point")
    print("   clouds to refine the alignment. Distance threshold 0.02.")
    result_icp = registration_icp(source, target, 0.02,
            current_transformation, TransformationEstimationPointToPlane())
    print(result_icp)
    draw_registration_result_original_color(
            source, target, result_icp.transformation)

    # colored pointcloud registration
     # This is implementation of following paper
     # J. Park, Q.-Y. Zhou, V. Koltun,
     # Colored Point Cloud Registration Revisited, ICCV 2017
    voxel_radius = [ 0.04, 0.02, 0.01 ];
    max_iter = [ 50, 30, 14 ];
    current_transformation = np.identity(4)
    print("3. Colored point cloud registration")
    for scale in range(3):
        iter = max_iter[scale]
        radius = voxel_radius[scale]
        print([iter,radius,scale])

        print("3-1. Downsample with a voxel size %.2f" % radius)
        source_down = voxel_down_sample(source, radius)
        target_down = voxel_down_sample(target, radius)

        print("3-2. Estimate normal.")
        estimate_normals(source_down, KDTreeSearchParamHybrid(
                radius = radius * 2, max_nn = 30))
        estimate_normals(target_down, KDTreeSearchParamHybrid(
                radius = radius * 2, max_nn = 30))

        print("3-3. Applying colored point cloud registration")
        result_icp = registration_colored_icp(source_down, target_down,
                radius, current_transformation,
                ICPConvergenceCriteria(relative_fitness = 1e-6,
                relative_rmse = 1e-6, max_iteration = iter))
        current_transformation = result_icp.transformation
        print(result_icp)
    draw_registration_result_original_color(
            source, target, result_icp.transformation)

Before f:id:robonchu:20180225182439p:plain

After f:id:robonchu:20180225182458p:plain

Global registration

  • これまでのICPは初期値に依存するlocalなregistrationメソッド

  • ここでは、もうひとつのクラスであるglobal registrationの紹介

  • この計算結果はこれまでのlocal ICPの初期値として用いられる

# src/Python/Tutorial/Advanced/global_registration.py

import sys
sys.path.append("../..")
from py3d import *
import numpy as np
import copy

def draw_registration_result(source, target, transformation):
    source_temp = copy.deepcopy(source)
    target_temp = copy.deepcopy(target)
    source_temp.paint_uniform_color([1, 0.706, 0])
    target_temp.paint_uniform_color([0, 0.651, 0.929])
    source_temp.transform(transformation)
    draw_geometries([source_temp, target_temp])

if __name__ == "__main__":

    print("1. Load two point clouds and disturb initial pose.")
    source = read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd")
    target = read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd")
    trans_init = np.asarray([[0.0, 1.0, 0.0, 0.0],
                            [1.0, 0.0, 0.0, 0.0],
                            [0.0, 0.0, 1.0, 0.0],
                            [0.0, 0.0, 0.0, 1.0]])
    source.transform(trans_init)
    draw_registration_result(source, target, np.identity(4))

    print("2. Downsample with a voxel size 0.05.")
    source_down = voxel_down_sample(source, 0.05)
    target_down = voxel_down_sample(target, 0.05)

    print("3. Estimate normal with search radius 0.1.")
    estimate_normals(source_down, KDTreeSearchParamHybrid(
            radius = 0.1, max_nn = 30))
    estimate_normals(target_down, KDTreeSearchParamHybrid(
            radius = 0.1, max_nn = 30))

    print("4. Compute FPFH feature with search radius 0.25")
    source_fpfh = compute_fpfh_feature(source_down,
            KDTreeSearchParamHybrid(radius = 0.25, max_nn = 100))
    target_fpfh = compute_fpfh_feature(target_down,
            KDTreeSearchParamHybrid(radius = 0.25, max_nn = 100))

    print("5. RANSAC registration on downsampled point clouds.")
    print("   Since the downsampling voxel size is 0.05, we use a liberal")
    print("   distance threshold 0.075.")
    result_ransac = registration_ransac_based_on_feature_matching(
            source_down, target_down, source_fpfh, target_fpfh, 0.075,
            TransformationEstimationPointToPoint(False), 4,
            [CorrespondenceCheckerBasedOnEdgeLength(0.9),
            CorrespondenceCheckerBasedOnDistance(0.075)],
            RANSACConvergenceCriteria(4000000, 500))
    print(result_ransac)
    draw_registration_result(source_down, target_down,
            result_ransac.transformation)

    print("6. Point-to-plane ICP registration is applied on original point")
    print("   clouds to refine the alignment. This time we use a strict")
    print("   distance threshold 0.02.")
    result_icp = registration_icp(source, target, 0.02,
            result_ransac.transformation,
            TransformationEstimationPointToPlane())
    print(result_icp)
    draw_registration_result(source, target, result_icp.transformation)

f:id:robonchu:20180225190834p:plain

Extract geometric feature

Fast Point Feature Histograms
  • PFH の改良版. 特徴記述の組み合わせ数を削減して高速化.

参考:

The FPFH feature is a 33-dimensional vector that describes the local geometric property of a point.

A nearest neighbor query in the 33-dimensinal space can return points with similar local geometric structures. See [Rasu2009] for details.

RANSAC

  • Random Sampling Consensusの略

  • ロバスト推定法の1つ. 外れ値の影響を除外して、フィッティングなどができる.

参考:

The core function is registration_ransac_based_on_feature_matching.

The most important hyperparameter of this function is RANSACConvergenceCriteria.

It defines the maximum number of RANSAC iterations and the maximum number of validation steps.

The larger these two numbers are, the more accurate the result is, but also the more time the algorithm takes.

f:id:robonchu:20180225190904p:plain

これを初期値として、ICP registrationを行うと

f:id:robonchu:20180225191019p:plain

こんな感じ. いい感じ.

Multiway registration

手順

  1. Build a pose graph

    • A pose graph has two key elements: nodes and edges.

    • The script creates a pose graph with three nodes and three edges. Among the edges, two of them are odometry edges (uncertain = False) and one is a loop closure edge (uncertain = True).

  2. Optimize a pose graph

  3. Visualize optimization

Multiway registration is the process to align multiple pieces of geometry in a global space.

Open3D implements multiway registration via pose graph optimization. The backend implements the technique presented in [Choi2015].

# src/Python/Tutorial/Advanced/multiway_registration.py

import sys
sys.path.append("../..")
from py3d import *

if __name__ == "__main__":

    set_verbosity_level(VerbosityLevel.Debug)
    pcds = []
    for i in range(3):
        pcd = read_point_cloud(
                "../../TestData/ICP/cloud_bin_%d.pcd" % i)
        downpcd = voxel_down_sample(pcd, voxel_size = 0.02)
        pcds.append(downpcd)
    draw_geometries(pcds)

    pose_graph = PoseGraph()
    odometry = np.identity(4)
    pose_graph.nodes.append(PoseGraphNode(odometry))

    n_pcds = len(pcds)
    for source_id in range(n_pcds):
        for target_id in range(source_id + 1, n_pcds):
            source = pcds[source_id]
            target = pcds[target_id]

            print("Apply point-to-plane ICP")
            icp_coarse = registration_icp(source, target, 0.3,
                    np.identity(4),
                    TransformationEstimationPointToPlane())
            icp_fine = registration_icp(source, target, 0.03,
                    icp_coarse.transformation,
                    TransformationEstimationPointToPlane())
            transformation_icp = icp_fine.transformation
            information_icp = get_information_matrix_from_point_clouds(
                    source, target, 0.03, icp_fine.transformation)
            print(transformation_icp)

            # draw_registration_result(source, target, np.identity(4))
            print("Build PoseGraph")
            if target_id == source_id + 1: # odometry case
                odometry = np.dot(transformation_icp, odometry)
                pose_graph.nodes.append(
                        PoseGraphNode(np.linalg.inv(odometry)))
                pose_graph.edges.append(
                        PoseGraphEdge(source_id, target_id,
                        transformation_icp, information_icp, uncertain = False))
            else: # loop closure case
                pose_graph.edges.append(
                        PoseGraphEdge(source_id, target_id,
                        transformation_icp, information_icp, uncertain = True))

    print("Optimizing PoseGraph ...")
    option = GlobalOptimizationOption(
            max_correspondence_distance = 0.03,
            edge_prune_threshold = 0.25,
            reference_node = 0)
    global_optimization(pose_graph,
            GlobalOptimizationLevenbergMarquardt(),
            GlobalOptimizationConvergenceCriteria(), option)

    print("Transform points and display")
    for point_id in range(n_pcds):
        print(pose_graph.nodes[point_id].pose)
        pcds[point_id].transform(pose_graph.nodes[point_id].pose)
    draw_geometries(pcds)

Before f:id:robonchu:20180225194151p:plain

After f:id:robonchu:20180225194206p:plain

RGBD integration

Open3D implements a scalable RGBD image integration algorithm.

In order to support large scenes, we use a hierarchical hashing structure introduced in Integrater in ElasticReconstruction.

# src/Python/Tutorial/Advanced/rgbd_integration.py

import sys
sys.path.append("../..")
from py3d import *
from trajectory_io import *
import numpy as np

if __name__ == "__main__":
    camera_poses = read_trajectory("../../TestData/RGBD/odometry.log")  # <-抜けている
    volume = ScalableTSDFVolume(voxel_length = 4.0 / 512.0,
            sdf_trunc = 0.04, with_color = True)

    for i in range(len(camera_poses)):
        print("Integrate {:d}-th image into the volume.".format(i))
        color = read_image("../../TestData/RGBD/color/{:05d}.jpg".format(i))
        depth = read_image("../../TestData/RGBD/depth/{:05d}.png".format(i))
        rgbd = create_rgbd_image_from_color_and_depth(color, depth,
                depth_trunc = 4.0, convert_rgb_to_intensity = False)
        volume.integrate(rgbd, PinholeCameraIntrinsic.prime_sense_default,
                np.linalg.inv(camera_poses[i].pose))

    print("Extract a triangle mesh from the volume and visualize it.")
    mesh = volume.extract_triangle_mesh()
    mesh.compute_vertex_normals()
    draw_geometries([mesh])

Camera Trajectory

TSDF (Truncated Signed Distance Function) volume integration

面の位置を0として、各Voxelに面までの距離を符号付で格納

Extract a mesh

Mesh extraction uses the marching cubes algorithm [LorensenAndCline1987].

参考: マーチングキューブ法 - Wikipedia

f:id:robonchu:20180225195949p:plain

所感

たくさんregistrationする方法はあるけど、タスク(環境や精度or速度の取捨選択)に応じて適切なものを選ぶにはカンコツがいりそう.

たくさん使って経験値をためたい.

Open3Dのお勉強(二回目)

やりたいこと

Depthセンサで取得したデータをOpen3Dで自由自在に操りたい

f:id:robonchu:20180224200259p:plain

教科書

Open3D: A Modern Library for 3D Data Processing — Open3D 0.1 dev documentation

Rgbd Odometry

[Park2017] : Park, Q.-Y. Zhou, and V. Koltun, Colored Point Cloud Registration Revisited, ICCV, 2017. : https://pdfs.semanticscholar.org/e668/cdf876884106948c7151601785023f04ed40.pdf

の実装が

    option = OdometryOption()
    odo_init = np.identity(4)
    print(option)

    [success_hybrid_term, trans_hybrid_term, info] = compute_rgbd_odometry(
            source_rgbd_image, target_rgbd_image,
            pinhole_camera_intrinsic, odo_init,
            RGBDOdometryJacobianFromHybridTerm(), option)

で使える。

もうひとつ実装あるが、こちらがオススメみたい。

if success_hybrid_term:
    print("Using Hybrid RGB-D Odometry")
    print(trans_hybrid_term)
    source_pcd_hybrid_term = create_point_cloud_from_rgbd_image(
            source_rgbd_image, pinhole_camera_intrinsic)
    source_pcd_hybrid_term.transform(trans_hybrid_term)
    draw_geometries([target_pcd, source_pcd_hybrid_term])

返り値のtrans_hybrid_termは以下

三次元の同次座標変換行列

Using Hybrid RGB-D Odometry
[[  9.99994666e-01  -1.00290715e-03  -3.10826763e-03  -3.75410348e-03]
 [  9.64492959e-04   9.99923448e-01  -1.23356675e-02   2.54977516e-02]
 [  3.12040122e-03   1.23326038e-02   9.99919082e-01   1.88139799e-03]
 [  0.00000000e+00   0.00000000e+00   0.00000000e+00   1.00000000e+00]]

参考:

compute_rgbd_odometryについて確認

help(compute_rgbd_odometry)

結果

Help on built-in function compute_rgbd_odometry in module py3d:

compute_rgbd_odometry(...)
    compute_rgbd_odometry(rgbd_source: py3d.RGBDImage, rgbd_target: py3d.RGBDImage, pinhole_camera_intrinsic: py3d.PinholeCameraIntrinsic=PinholeCameraIntrinsic with width = -1 and height = -1.
    Access intrinsics with intrinsic_matrix., odo_init: numpy.ndarray[float64[4, 4]]=array([[1., 0., 0., 0.],
           [0., 1., 0., 0.],
           [0., 0., 1., 0.],
           [0., 0., 0., 1.]]), jacobian: py3d.RGBDOdometryJacobian=RGBDOdometryJacobianFromHybridTerm, option: py3d.OdometryOption=OdometryOption class.
    minimum_correspondence_ratio = 0.100000
    iteration_number_per_pyramid_level = [ 20, 10, 5, ] 
    max_depth_diff = 0.030000
    min_depth = 0.000000
    max_depth = 4.000000) 

-> Tuple[bool, numpy.ndarray[float64[4, 4]], numpy.ndarray[float64[6, 6]]]
    
    Function to estimate 6D rigid motion from two RGBD image pairs

KD tree

We pick the 1500-th point as the anchor point and paint it red.

# src/Python/Tutorial/Basic/kdtree.py

import sys
import numpy as np
sys.path.append("../..")
from py3d import *

if __name__ == "__main__":

    print("Testing kdtree in py3d ...")
    print("Load a point cloud and paint it gray.")
    pcd = read_point_cloud("../../TestData/Feature/cloud_bin_0.pcd")
    pcd.paint_uniform_color([0.5, 0.5, 0.5])
    pcd_tree = KDTreeFlann(pcd)

    print("Paint the 1500th point red.")
    pcd.colors[1500] = [1, 0, 0]

    print("Find its 200 nearest neighbors, paint blue.")
    [k, idx, _] = pcd_tree.search_knn_vector_3d(pcd.points[1500], 200)
    np.asarray(pcd.colors)[idx[1:], :] = [0, 0, 1]

    print("Find its neighbors with distance less than 0.2, paint green.")
    [k, idx, _] = pcd_tree.search_radius_vector_3d(pcd.points[1500], 0.2)
    np.asarray(pcd.colors)[idx[1:], :] = [0, 1, 0]

    print("Visualize the point cloud.")
    draw_geometries([pcd])
    print("")

近傍点を探索する

f:id:robonchu:20180225112642p:plain

Iterative Closest Point

ICPとは: 2つの点群を重ね合わせる手法

うまくfittingするには初期値が重要みたい

そして、この初期値はglobal registration algorithmでいい感じが得れるっぽい。

The initial alignment is usually obtained by a global registration algorithm.

See Global registration for examples(http://www.open3d.org/docs/tutorial/Advanced/global_registration.html#global-registration).

参考: 

Open3DのTutorialでは"point-to-point ICP"と"point-to-plane ICP"を使っている

The input are two point clouds and an initial transformation that roughly aligns the source point cloud to the target point cloud.

The output is a refined transformation that tightly aligns the two point clouds

# src/Python/Tutorial/Basic/icp_registration.py

def draw_registration_result(source, target, transformation):
    source_temp = copy.deepcopy(source)
    target_temp = copy.deepcopy(target)
    source_temp.paint_uniform_color([1, 0.706, 0])
    target_temp.paint_uniform_color([0, 0.651, 0.929])
    source_temp.transform(transformation)
    draw_geometries([source_temp, target_temp])

if __name__ == "__main__":
    source = read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd")
    target = read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd")
    threshold = 0.02
    trans_init = np.asarray(
                [[0.862, 0.011, -0.507,  0.5],
                [-0.139, 0.967, -0.215,  0.7],
                [0.487, 0.255,  0.835, -1.4],
                [0.0, 0.0, 0.0, 1.0]])
    draw_registration_result(source, target, trans_init)
    print("Initial alignment")
    evaluation = evaluate_registration(source, target,
            threshold, trans_init)
    print(evaluation)

    print("Apply point-to-point ICP")
    reg_p2p = registration_icp(source, target, threshold, trans_init,
            TransformationEstimationPointToPoint())
    print(reg_p2p)
    print("Transformation is:")
    print(reg_p2p.transformation)
    print("")
    draw_registration_result(source, target, reg_p2p.transformation)

    print("Apply point-to-plane ICP")
    reg_p2l = registration_icp(source, target, threshold, trans_init,
            TransformationEstimationPointToPlane())
    print(reg_p2l)
    print("Transformation is:")
    print(reg_p2l.transformation)
    print("")
    draw_registration_result(source, target, reg_p2l.transformation)

f:id:robonchu:20180225115139p:plain

Point to Point ICP

In general, the ICP algorithm iterates over two steps:

・ Find correspondence set K={(p,q)}. from target point cloud P, and source point cloud Q transformed with current transformation matrix T.

・ Update the transformation T by minimizing an objective function E(T) defined over the correspondence set K.

f:id:robonchu:20180225115231p:plain

イテレーション回数を増やすと一致率向上するよ

Point to Plane ICP

point to pointより計算速度が速いみたい

[Rusinkiewicz2001] has shown that the point-to-plane ICP algorithm has a faster convergence speed than the point-to-point ICP algorithm.

f:id:robonchu:20180225115312p:plain

Working with numpy

numpy -> open3dの型

open3d -> numpyの型

変換が簡単にできる

# src/Python/Tutorial/Basic/working_with_numpy.py

import sys, copy
import numpy as np
sys.path.append("../..")
from py3d import *

if __name__ == "__main__":

    # generate some neat n times 3 matrix using a variant of sync function
    x = np.linspace(-3, 3, 401)
    mesh_x, mesh_y = np.meshgrid(x,x)
    z = np.sinc((np.power(mesh_x,2)+np.power(mesh_y,2)))
    xyz = np.zeros((np.size(mesh_x),3))
    xyz[:,0] = np.reshape(mesh_x,-1)
    xyz[:,1] = np.reshape(mesh_y,-1)
    xyz[:,2] = np.reshape(z,-1)
    print('xyz')
    print(xyz)

    # Pass xyz to Open3D.PointCloud and visualize
    pcd = PointCloud()
    pcd.points = Vector3dVector(xyz)
    write_point_cloud("../../TestData/sync.ply", pcd)

    # Load saved point cloud and transform it into NumPy array
    pcd_load = read_point_cloud("../../TestData/sync.ply")
    xyz_load = np.asarray(pcd_load.points)
    print('xyz_load')
    print(xyz_load)

    # visualization
    draw_geometries([pcd_load])

print結果

xyz
[[-3.00000000e+00 -3.00000000e+00 -3.89817183e-17]
 [-2.98500000e+00 -3.00000000e+00 -4.94631078e-03]
 [-2.97000000e+00 -3.00000000e+00 -9.52804798e-03]
 ...[f:id:robonchu:20180225115312p:plain]
 [ 2.97000000e+00  3.00000000e+00 -9.52804798e-03]
 [ 2.98500000e+00  3.00000000e+00 -4.94631078e-03]
 [ 3.00000000e+00  3.00000000e+00 -3.89817183e-17]]
Writing PLY: [========================================] 100%
Reading PLY: [========================================] 100%
xyz_load
[[-3.00000000e+00 -3.00000000e+00 -3.89817183e-17]
 [-2.98500000e+00 -3.00000000e+00 -4.94631078e-03]
 [-2.97000000e+00 -3.00000000e+00 -9.52804798e-03]
 ...
 [ 2.97000000e+00  3.00000000e+00 -9.52804798e-03]
 [ 2.98500000e+00  3.00000000e+00 -4.94631078e-03]
 [ 3.00000000e+00  3.00000000e+00 -3.89817183e-17]]

f:id:robonchu:20180225120527p:plain

ん〜すごくわかりやすい!

Open3Dのお勉強(一回目)

やりたいこと

Depthセンサで取得したデータをOpen3Dで自由自在に操りたい

Open3D – A Modern Library for 3D Data Processing

f:id:robonchu:20180224200259p:plain

Open3Dまじでイケてる!

Intelさんありがとうございまぁぁす!!

教科書

Open3D: A Modern Library for 3D Data Processing — Open3D 0.9.0 documentation

インストール

以下、cmakeでpythonのバージョンが揃う指定している

scripts/install-deps-ubuntu.sh
mkdir build
cd build
cmake -DPYTHON_EXECUTABLE:FILEPATH=/usr/bin/python ../src
make -j

fatal error: Python.h: No such file or directory

が出たら、

make test works, but unable to run tests individually · Issue #234 · pybind/pybind11 · GitHub

ライブラリpy3dのテスト

  • $Open3D/build/lib/Tutorial/Basic$ python rgbd_redwood.py

f:id:robonchu:20180224164158p:plain

が出たらおけ

Getting Started

Redwood dataset

大事↓

The Redwood format stored depth in a 16-bit single channel image.

The integer value represents the depth measurement in millimeters. It is the default format for Open3D to parse depth images.

# src/Python/Tutorial/Basic/rgbd_redwood.py

import sys
sys.path.append("../..")

#conda install pillow matplotlib
from py3d import *
import matplotlib.pyplot as plt


if __name__ == "__main__":
    print("Read Redwood dataset")
    color_raw = read_image("../../TestData/RGBD/color/00000.jpg")
    depth_raw = read_image("../../TestData/RGBD/depth/00000.png")
    rgbd_image = create_rgbd_image_from_color_and_depth(
        color_raw, depth_raw);
    print(rgbd_image)
    plt.subplot(1, 2, 1)
    plt.title('Redwood grayscale image')
    plt.imshow(rgbd_image.color)
    plt.subplot(1, 2, 2)
    plt.title('Redwood depth image')
    plt.imshow(rgbd_image.depth)
    plt.show()
    pcd = create_point_cloud_from_rgbd_image(rgbd_image,
            PinholeCameraIntrinsic.prime_sense_default)
    # Flip it, otherwise the pointcloud will be upside down
    pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
    draw_geometries([pcd])

The default conversion function create_rgbd_image_from_color_and_depth creates an RGBDImage from a pair of color and depth image.

The color image is converted into a grayscale image, stored in float ranged in [0, 1].

The depth image is stored in float, representing the depth value in meters. print(rgbd_image) yields:

RGBDImage of size
Color image : 640x480, with 1 channels.
Depth image : 640x480, with 1 channels.
Use numpy.asarray to access buffer data.

Here we use PinholeCameraIntrinsic.prime_sense_default as default camera parameter.

データセット

有名所の

  • Redwood dataset
  • SUN dataset
  • NYU dataset
  • TUM dataset

のデータセットの読み込みなど丁寧な説明がある。

本当に素晴らしい。

Basic

python binding

def example_import_function():
    from py3d import read_point_cloud
    pcd = read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd")
    print(pcd)

-> PointCloud with 198835 points.

以下のようにhelp()でクラスや関数の説明が見れる

def example_help_function():
    import py3d as py3d
    help(py3d)
    help(py3d.PointCloud)
    help(py3d.read_point_cloud)

Point Cloud

# src/Python/Tutorial/Basic/pointcloud.py

import sys
import numpy as np
sys.path.append("../..")
from py3d import *

if __name__ == "__main__":

    print("Load a ply point cloud, print it, and render it")
    pcd = read_point_cloud("../../TestData/fragment.ply")
    print(pcd)
    print(np.asarray(pcd.points))
    draw_geometries([pcd])

    print("Downsample the point cloud with a voxel of 0.05")
    downpcd = voxel_down_sample(pcd, voxel_size = 0.05)
    draw_geometries([downpcd])

    print("Recompute the normal of the downsampled point cloud")
    estimate_normals(downpcd, search_param = KDTreeSearchParamHybrid(
            radius = 0.1, max_nn = 30))
    draw_geometries([downpcd])
    print("")

    print("Load a polygon volume and use it to crop the original point cloud")
    vol = read_selection_polygon_volume("../../TestData/Crop/cropped.json")
    chair = vol.crop_point_cloud(pcd)
    draw_geometries([chair])
    print("")

    print("Paint chair")
    chair.paint_uniform_color([1, 0.706, 0])
    draw_geometries([chair])
    print("")
読込

pcdデータなど一行で読み込める

read_point_cloud reads a point cloud from a file. It tries to decode the file based on the extension name.

The supported extension names are: pcd, ply, xyz, xyzrgb, xyzn, pts.

表示

表示ももちろん一行笑

draw_geometries visualizes the point cloud. Use mouse/trackpad to see the geometry from different view point.

f:id:robonchu:20180224194335p:plain

操作方法

-- Mouse view control --
  Left button + drag        : Rotate.
  Ctrl + left button + drag : Translate.
  Wheel                     : Zoom in/out.

-- Keyboard view control --
  [/]          : Increase/decrease field of view.
  R            : Reset view point.
  Ctrl/Cmd + C : Copy current view status into the clipboard.
  Ctrl/Cmd + V : Paste view status from clipboard.

-- General control --
  Q, Esc       : Exit window.
  H            : Print help message.
  P, PrtScn    : Take a screen capture.
  D            : Take a depth capture.
  O            : Take a capture of current rendering settings.
:

アニメーションもつくれるみたい

In addition to draw_geometries, Open3D has a set of sibling functions with more advanced functionality. draw_geometries_with_custom_animation allows the programmer to define a custom view trajectory and play an animation in the GUI. draw_geometries_with_animation_callback and draw_geometries_with_key_callback accept Python callback functions as input. The callback function is called in an automatic animation loop, or upon a key press event. See Customized visualization for details.

Customized visualization — Open3D 0.9.0 documentation

ダウンサンプリング

まさかのダウンサンプリングも一行驚

print("Downsample the point cloud with a voxel of 0.05")
downpcd = voxel_down_sample(pcd, voxel_size = 0.05)
draw_geometries([downpcd])

f:id:robonchu:20180224194817p:plain

Vertex normal estimation
print("Recompute the normal of the downsampled point cloud")
estimate_normals(downpcd, search_param = KDTreeSearchParamHybrid(
        radius = 0.1, max_nn = 30))
draw_geometries([downpcd])
print("")

f:id:robonchu:20180224200806p:plain

画面上で"n", "+", "-"で法線の操作が可能

estimate_normals computes normal for every point. The function finds adjacent points and calculate the principal axis of the adjacent points using covariance analysis.

The function takes an instance of KDTreeSearchParamHybrid class as an argument. The two key arguments radius = 0.1 and max_nn = 30 specifies search radius and maximum nearest neighbor. It has 10cm of search radius, and only considers up to 30 neighbors to save computation time.

KDtree: kd木 - Wikipedia

Crop point cloud
print("We load a polygon volume and use it to crop the original point cloud")
vol = read_selection_polygon_volume("../../TestData/Crop/cropped.json")
chair = vol.crop_point_cloud(pcd)
draw_geometries([chair])
print("")

f:id:robonchu:20180224200045p:plain

Paint Point Cloud
print("Paint chair")
chair.paint_uniform_color([1, 0.706, 0])
draw_geometries([chair])
print("")

f:id:robonchu:20180224200056p:plain

コツコツ使い方を覚えてPoint Cloudを自由に扱えるようになりたい〜

ROS x ビデオカメラで遊んでみる

やりたいこと

ROSでUSBカメラを使ってみる

教科書

ROSプログラミング | 森北出版株式会社

ライセンス

  <maintainer email="fei@kanto-gakuin.ac.jp">Fei Qian</maintainer>
  <license>BSD</license>
  <author>Fei Qian</author>

環境設定

  1. sudo apto-get install ros-kinetic-usb-cam

  2. sudo apt-get install ros-kinetico-image-pipeline

設定

usb_camについて詳細: usb_cam - ROS Wiki

pixel_format

RGB データと YUV データについて:  YUVは、Yは輝度、Uは輝度と青の差、Vは輝度と赤の差

  1. http://www.eizo.co.jp/support/db/files/technical_information/2010/WP10-009.pdf

👆超わかりやすい

コーデックについて:

  1. 動画コーデックの種類と違い(H.264・VP9・MPEG・Xvid・DivX・WMV等)【比較】 | AviUtlの易しい使い方

  2. 色空間:RGBとYUVの違い - ぼくんちのTV 別館

フォーマットの設定と受信、確認

$ rosparam set /usb_cam/pixel_format mjpeg

$ rosrun usb_cam usb_cam_node &

$ rosrun image_view image_view image:=/usb_cam/image_raw

この他に

$ rqtのpluginsのvisualizationから見ることもできる

$ rqt_image_viewでも見れる

<launch>
  <node name="usb_cam" pkg="usb_cam" type="usb_cam_node" output="screen" >
    <param name="video_device" value="/dev/video0" />
    <param name="camera_frame_id" value="usb_cam" />
  </node>
  <node name="image_view" pkg="image_view" type="image_view" respawn="false" output="screen" required="true">
    <remap from="image" to="/usb_cam/image_raw"/>
    <param name="autosize" value="true" />
  </node>
</launch>

[ WARN] [1519217872.252076062]: Camera calibration file /home/takuya-usb/.ros/camera_info/head_camera.yaml not found.

カメラキャリブレーション

f:id:robonchu:20180126150406p:plain

参考: カメラ キャリブレーションとは - MATLAB & Simulink - MathWorks 日本

f:id:robonchu:20180221221301p:plain

  1. チェッカーボードをダウンロード

  2. キャリブレーション

    • rosrun usb_cam usb_cam_node

    • rosrun camera_calibration cameracalibrator.py --size 8x6 --square 0.026 image:=/usb_cam/image_raw

  3. キャリブデータの抽出

    1. cd /tmp

    2. tar zxvf calibrationdata.tar.gz

    3. cp ost.txt ost.ini

    4. rosrun camera_calibration_parsers convert ost.ini head_camera.yaml

    5. head_camera.yamlのcamera_nameをhead_cameraに変更

    6. head_camera.yamlを~/.ros/camera.infoにコピー

ビデオストリームの配布と購読

参考:image_transport - ROS Wiki

image_transport

image transportを用いると、イメージを圧縮して送ることにより、ビデオストリームを転送する際の帯域への圧迫を緩和することができる。

  • pythonはimage_transportをサポートしていないため、以下のようにrepublishを用いる。

    • $ republish [in_transport] in:=<in_base_topic> [out_transport] out:=<out_base_topic>
  • cppでは#include <image_transport/image_transport.h>で使用可能

参考: http://mprg-robot.isc.chubu.ac.jp/image_transport/

image_transportでトピック配布

以下の2つのメソッドを提供する

  1. ImageTransportのadvertise

  2. ImageTransportのadvertiseCamera

  • image_transportのpublisherは単一トピック配布とは異なり、複数の独立したトピックを同時に配布することができる。

  • compressedで別のトピックで配布するなら、トピック名は<ベーストピック名>/compressedとなる。

  • このcompressedのような補助機能をサブトピックという。

$rosmsg show sensor_msgs/Image

std_msgs/Header header
  uint32 seq
  time stamp
  string frame_id
uint32 height
uint32 width
string encoding
uint8 is_bigendian
uint32 step
uint8[] data
image_transportが提供しているプラグイン
  1. image_transport ("raw") - The default transport, sending sensor_msgs/Image through ROS.

  2. compressed_image_transport ("compressed") - JPEG or PNG image compression.

  3. theora_image_transport ("theora") - Streaming video using the Theora codec.

  • CameraPublisherでは上記トピックに加えて、base_topic/camera_infoも利用することができる

  • Theora(セオラ)とは: オープンな非可逆の動画圧縮ファイルフォーマット、または、そのコーデックである。

  • PNGとは: Portable Network Graphics

  • JPEGとは: Joint Photographic Experts Group

参考:

Theoraコーデックを使ってみた① [インストール] - It’s now or never

JPG・PNG・GIFの違いをまとめてみました。 | ギズモード・ジャパン

image_transportの使い方

image_transport/Tutorials/WritingNewTransport - ROS Wiki

購読する時はベーストピック名を使用する。

サブトピック名は以下のようにパラメータでセットする。

  • rosparam set base_topic/image_transport [raw | compressed | theora]

キーフレームとは: キーフレームとは?Iフレーム・Pフレーム・Bフレームの違い【GOP】 | AviUtlの易しい使い方

参考:

camera_calibration - ROS Wiki

cv_bridge

ROSではOpenCVを簡単に使えるようなインターフェースcv_bridgeを提供している。

  • ROSのストリームタイプ: sensor_msgs/Image

  • OpenCVのタイプ: Intel image processing library

f:id:robonchu:20180126154843p:plain

  • #include <cv_bridge/cv_bridge.h>とすれば使用可能。

イメージ購読プログラム

#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>

//#include <opencv2/highgui/highgui.hpp>


void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
    try {
        cv::imshow("Image_Subscriber", cv_bridge::toCvShare(msg, "bgr8")->image);
        cv::waitKey(30);
    }
    catch (cv_bridge::Exception& e) {
        ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
    }
}

int main(int argc, char **argv)
{
    ros::init(argc, argv, "image_subscriber");
    ros::NodeHandle nh;
    cv::namedWindow("Image_Subscriber");
    cv::startWindowThread();

    image_transport::ImageTransport it(nh);
    image_transport::Subscriber sub = it.subscribe("camera_raw", 1, imageCallback);

    ros::spin();
    cv::destroyWindow("camera_view");

    return EXIT_SUCCESS;
}

CMake

参考: ja/catkin/CMakeLists.txt - ROS Wiki

cmake_minimum_required(VERSION 2.8.3)
project(my_camera)

find_package(catkin REQUIRED COMPONENTS
  roscpp
  std_msgs
  cv_bridge
  image_transport
)

catkin_package(
  CATKIN_DEPENDS roscpp std_msgs cv_bridge image_transport
)

find_package(OpenCV)

include_directories(
  include
  ${catkin_INCLUDE_DIRS}
  ${OpenCV_INCLUDE_DIRS}
)

add_executable(image_sub src/image_subscriber.cpp)
target_link_libraries(image_sub
  ${catkin_LIBRARIES}
  ${OpenCV_LIBRARIES}
)

参考: ja/catkin/CMakeLists.txt - ROS Wiki

pakage.xml

  <buildtool_depend>catkin</buildtool_depend>

  <build_depend>roscpp</build_depend>
  <build_depend>std_msgs</build_depend>
  <build_depend>image_transport</build_depend>
  <build_depend>cv_bridge</build_depend>

  <run_depend>roscpp</run_depend>
  <run_depend>std_msgs</run_depend>
  <run_depend>image_transport</run_depend>
  <run_depend>cv_bridge</run_depend>

イメージ配布プログラム

以下でカメラの各パラメータをチェック

  • $ v4l2-ctl -d /dev/video0 --list-formats-ext
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
#include <opencv2/highgui/highgui.hpp>

int main( int argc, char **argv )
{
    ros::init( argc, argv, "image_publisher" );
    ros::NodeHandle nh;

    // Open camera with CAMERA_INDEX (webcam is typically #0).
    const int CAMERA_INDEX = 0;
    cv::VideoCapture capture( CAMERA_INDEX );
    if( not capture.isOpened() ) {
        ROS_INFO_STREAM("Failed to open camera with index " << CAMERA_INDEX << "!");
        ros::shutdown();
    }
    // Set the parameters using the results of executing the following command:
    //   v4l2-ctl -d /dev/video0 --list-formats-ext
    capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
        capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
        capture.set(CV_CAP_PROP_FPS, 30);
    //double width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
    //double height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
    //ROS_DEBUG_STREAM("width: " << width << ", height: " << height);

    image_transport::ImageTransport it(nh);
    image_transport::Publisher pub_image = it.advertise( "camera_raw", 1 );

    // for debug
    cv::namedWindow("Image_Publisher", CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);

    cv_bridge::CvImagePtr frame = boost::make_shared< cv_bridge::CvImage >();
    frame->encoding = sensor_msgs::image_encodings::BGR8;
    while( ros::ok() ) {
        // for debug
        cv::Mat image;
        capture >> image;
        cv::imshow("Image_Publisher", image);
        //

        capture >> frame->image;
        if( frame->image.empty() ) {
            ROS_ERROR_STREAM( "Failed to capture frame!" );
            ros::shutdown();
        }
        frame->header.stamp = ros::Time::now();
        pub_image.publish( frame->toImageMsg() );

        if(cv::waitKey(30) >= 0) break;
        ros::spinOnce();
    }

    capture.release();
    return EXIT_SUCCESS;
}

launch

<launch>
  <param name="pixel_format" value="mjpeg" />
  <node name="image_publisher"  pkg="my_camera" type="image_pub"  output="screen"/>
  <node name="image_subscriber" pkg="my_camera" type="image_sub" output="screen"/>
</launch>

f:id:robonchu:20180221230211p:plain

$ rostopic list

/camera_raw
/camera_raw/compressed
/camera_raw/compressed/parameter_descriptions
/camera_raw/compressed/parameter_updates
/camera_raw/compressedDepth
/camera_raw/compressedDepth/parameter_descriptions
/camera_raw/compressedDepth/parameter_updates
/camera_raw/theora
/camera_raw/theora/parameter_descriptions
/camera_raw/theora/parameter_updates
/rosout
/rosout_agg

カメライメージ配布プログラム

#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
#include <opencv2/highgui/highgui.hpp>

int main( int argc, char **argv )
{
    ros::init( argc, argv, "image_Camerapublisher" );
    ros::NodeHandle n;

    // Open camera with CAMERA_INDEX (webcam is typically #0).
    const int CAMERA_INDEX = 0;
    cv::VideoCapture capture( CAMERA_INDEX );
    if( not capture.isOpened() ) {
        ROS_INFO_STREAM("Failed to open camera with index " << CAMERA_INDEX << "!");
        ros::shutdown();
    }

    // Set the parameters using the results of executing the following command:
    //   v4l2-ctl -d /dev/video0 --list-formats-ext
    capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
        capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
        capture.set(CV_CAP_PROP_FPS, 30);

    image_transport::ImageTransport it( n );
    image_transport::CameraPublisher pub_image = it.advertiseCamera( "camera_raw", 1 );
    sensor_msgs::CameraInfo cam_info;

    // for debug
    //cv::namedWindow("Image_CameraPublisher", CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);

    cv_bridge::CvImagePtr frame = boost::make_shared< cv_bridge::CvImage >();
    frame->encoding = sensor_msgs::image_encodings::BGR8;
    while( ros::ok() ) {
        // for debug
        //cv::Mat image;
        //capture >> image;
        //cv::imshow("Image_CameraPublisher", image);
        //

        capture >> frame->image;
        if( frame->image.empty() ) {
            ROS_ERROR_STREAM( "Failed to capture frame!" );
            ros::shutdown();
        }
        frame->header.stamp = ros::Time::now();
        cam_info.header.stamp = frame->header.stamp;
        pub_image.publish( frame->toImageMsg() , 
            sensor_msgs::CameraInfoConstPtr(new sensor_msgs::CameraInfo(cam_info)));

        if(cv::waitKey(30) >= 0) break;
        ros::spinOnce();
    }

    capture.release();
    return EXIT_SUCCESS;
}

カメライメージ購読プログラム

#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <opencv2/highgui/highgui.hpp>
#include <cv_bridge/cv_bridge.h>

void imageCallback(const sensor_msgs::ImageConstPtr& msg, const sensor_msgs::CameraInfoConstPtr& info)
{
    try {
        cv::imshow("image_CameraSubscriber", cv_bridge::toCvShare(msg, "bgr8")->image);
        if(cv::waitKey(30) >= 0) 
            ros::shutdown();
    }
    catch (cv_bridge::Exception& e) {
        ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
    }
}

int main(int argc, char **argv)
{
    ros::init(argc, argv, "image_CameraSubscriber");
    ros::NodeHandle nh;
    cv::namedWindow("image_CameraSubscriber");
    cv::startWindowThread();

    image_transport::ImageTransport it(nh);
    image_transport::CameraSubscriber sub;

    sub = it.subscribeCamera("camera_raw", 1, imageCallback);

    ros::spin();
    cv::destroyWindow("image_CameraSubscriber");

    return EXIT_SUCCESS;
}

launch

<launch>
  <node name="camera_publisher"  pkg="my_camera" type="camera_pub"  output="screen"/>
  <node name="camera_subscriber" pkg="my_camera" type="camera_sub" 
    required="true" output="screen"/>
</launch>

f:id:robonchu:20180221231418p:plain

$ rostopic list

/camera_info
/camera_raw
/camera_raw/compressed
/camera_raw/compressed/parameter_descriptions
/camera_raw/compressed/parameter_updates
/camera_raw/compressedDepth
/camera_raw/compressedDepth/parameter_descriptions
/camera_raw/compressedDepth/parameter_updates
/camera_raw/theora
/camera_raw/theora/parameter_descriptions
/camera_raw/theora/parameter_updates
/rosout
/rosout_agg

ROS x JoyStick で遊んでみる

やりたいこと

rosでjoy stickを操作してみる

buffalo.jp

教科書

ROSプログラミング | 森北出版株式会社

ライセンス

  <maintainer email="fei@kanto-gakuin.ac.jp">Fei Qian</maintainer>
  <license>BSD</license>
  <author>Fei Qian</author>

設定手順

バイス確認

  • ls /dev/input/js*

命令の確認

  • jstest /dev/input/js0

パッケージのインストール

入力デバイスの変更

  • rosparam set joy_node/dev "dev/input/jsX" <- X:デバイス番号

実行手順

  • rosrun joy joy_node

  • rostopic list

/diagnostics
/joy
/rosout
/rosout_agg
  • rostopic info /joy
Type: sensor_msgs/Joy

Publishers: 
 * /joy_node (http://172.17.0.1:34890/)

Subscribers: None
  • rosmsg show sensor_msgs/Joy
std_msgs/Header header
  uint32 seq
  time stamp
  string frame_id
float32[] axes
int32[] buttons
  • rostopic echo joy
header: 
  seq: 1
  stamp: 
    secs: 1516022983
    nsecs: 706817308
  frame_id: ''
axes: [0.0, 1.0]
buttons: [0, 0, 0, 0, 0, 0, 0, 0]
---
header: 
  seq: 2

プログラム作成

子ガメをjoy stickで操作してみる

  • rosrun turtlesim turtlesim_node &

  • rosnode info /turtlesim

Node [/turtlesim]
Publications: 
 * /rosout [rosgraph_msgs/Log]
 * /turtle1/color_sensor [turtlesim/Color]
 * /turtle1/pose [turtlesim/Pose]

Subscriptions: 
 * /turtle1/cmd_vel [unknown type]

Services: 
 * /clear
 * /kill
 * /reset
 * /spawn
 * /turtle1/set_pen
 * /turtle1/teleport_absolute
 * /turtle1/teleport_relative
 * /turtlesim/get_loggers
 * /turtlesim/set_logger_level


contacting node http://172.17.0.1:38981/ ...
Pid: 6614
Connections:
 * topic: /rosout
    * to: /rosout
    * direction: outbound
    * transport: TCPROS
  • rostopic type /turtle1/cmd_vel
geometry_msgs/Twist
  • rosmsg show -r geometry_msgs/Twist
# This expresses velocity in free space broken into its linear and angular parts.
Vector3  linear
Vector3  angular
  • rosmsg show geometry_msgs/Twist
geometry_msgs/Vector3 linear
  float64 x
  float64 y
  float64 z
geometry_msgs/Vector3 angular
  float64 x
  float64 y
  float64 z

実装

フォルダ構成

├── CMakeLists.txt
└── my_joystick
    ├── CMakeLists.txt
    ├── launch
    │   └── my_joystick.launch
    ├── package.xml
    └── src
        └── my_joystick.cpp

my_joystick.cpp

f:id:robonchu:20180221211513p:plain

Aボタンで速直進

Bボタンで速回転

#include <ros/ros.h>
#include <geometry_msgs/Twist.h>
#include <sensor_msgs/Joy.h>

// class definition
class TeleopTurtle
{
  public:
        //constructor
    TeleopTurtle();

  //hiding 
  private:
    void joyCallback(const sensor_msgs::Joy::ConstPtr& joy);
    ros::NodeHandle nh;
    int vel_linear, vel_angular;
    int lin_boost, ang_boost;
    double l_scale_, a_scale_;
    ros::Publisher  vel_pub_;
    ros::Subscriber joy_sub_;
};

//constructor
TeleopTurtle::TeleopTurtle(): vel_linear(1), vel_angular(0), a_scale_(3.2),l_scale_(2.0), lin_boost(0), ang_boost(1) //initialize
{
    nh.param("axis_linear"  , vel_linear , vel_linear); //set parameter
    nh.param("axis_angular" , vel_angular, vel_angular);
    nh.param("angular_boost" , ang_boost, ang_boost);
    nh.param("linear_boost" , lin_boost, lin_boost);
    nh.param("scale_angular", a_scale_, a_scale_);
    nh.param("scale_linear" , l_scale_, l_scale_);

    //subsucriber,use template
        //this pointa is object pointa
    joy_sub_ = nh.subscribe<sensor_msgs::Joy>("joy", 10, &TeleopTurtle::joyCallback, this);
    vel_pub_ = nh.advertise<geometry_msgs::Twist>("turtle1/cmd_vel", 1);
}

void TeleopTurtle::joyCallback(const sensor_msgs::Joy::ConstPtr& joy)
{
    geometry_msgs::Twist twist;
    if (joy->buttons[ang_boost] > 0){
      twist.angular.z = 2 * a_scale_*joy->axes[vel_angular];
    }else{
      twist.angular.z = a_scale_*joy->axes[vel_angular];
    }

    if (joy->buttons[lin_boost] > 0){
      twist.linear.x = 2 * l_scale_*joy->axes[vel_linear];
    }else{
      twist.linear.x = l_scale_*joy->axes[vel_linear];
    }
    
    ROS_INFO_STREAM("(" << joy->axes[vel_linear] << " " << joy->axes[vel_angular] << ")");
    vel_pub_.publish(twist);
}

int main(int argc, char** argv)
{
    ros::init(argc, argv, "teleop_turtle");
    TeleopTurtle teleop_turtle;

    ros::spin();
    return 0;
}

launch

<launch>

    <!-- Turtlesim Node -->
    <node pkg="turtlesim" type="turtlesim_node" name="sim"/>

    <!-- joy node -->
    <node pkg="joy" type="joy_node" name="teleopJoy" output="screen">
      <param name="dev" type="string" value="/dev/input/js0" />
      <param name="deadzone"          value="0.12" />
      <param name="autorepeat_rate"   value="1.0" />
      <param name="coalesce_interval" value="0.0" />
    </node>

    <!-- define buttons & Axes for fami con Pad-->
    <param name="axis_linear"   value="1"   type="int" />
    <param name="axis_angular"  value="0"   type="int" />
    <param name="linear_boost"   value="0"   type="int" />
    <param name="angular_boost"   value="1"   type="int" />
    <param name="scale_linear"  value="3.0" type="double" />
    <param name="scale_angular" value="2.0" type="double" />

    <node pkg="my_joystick" type="my_joystick" name="my_joystick" 
    launch-prefix="xterm -font r14 -bg darkblue -geometry 113x30+503+80 -e"
    output="screen" required="true" 
    />    

</launch>

CMakeList.txt

cmake_minimum_required(VERSION 2.8.3)
project(my_joystick)

find_package(catkin REQUIRED COMPONENTS
  geometry_msgs
  roscpp
  sensor_msgs
  std_msgs
)

catkin_package(
  CATKIN_DEPENDS geometry_msgs roscpp sensor_msgs std_msgs
)

include_directories(
  include
  ${catkin_INCLUDE_DIRS}
)

add_executable(my_joystick src/my_joystick.cpp)
target_link_libraries(my_joystick
  ${catkin_LIBRARIES}
)

package.xml

  <buildtool_depend>catkin</buildtool_depend>

  <build_depend>geometry_msgs</build_depend>
  <build_depend>roscpp</build_depend>
  <build_depend>sensor_msgs</build_depend>
  <build_depend>std_msgs</build_depend>
  <build_depend>turtlesim</build_depend>
  <build_depend>sensor_msgs</build_depend>
  <build_depend>joy</build_depend>

  <run_depend>geometry_msgs</run_depend>
  <run_depend>roscpp</run_depend>
  <run_depend>sensor_msgs</run_depend>
  <run_depend>std_msgs</run_depend>
  <run_depend>turtlesim</run_depend>
  <run_depend>sensor_msgs</run_depend>
  <run_depend>joy</run_depend>

systemdで処理を自動起動してみる

やりたいこと

PCを起動するときにいくつかの処理を自動起動にする

手順

注意

GUIなどを立ち上げたりエラーが出続けている場合は、正しい挙動にならないので、要注意!

その際は$ journalctl -b | lessなどでlogをチェック!!

シェルスクリプトの作成

boot_all.sh

#!/bin/sh

pass="pass"
echo $pass | sudo /home/nvidia/jetson_clocks.sh
cd '/home/nvidia/src'
/home/nvidia/openpose/bin/python /home/nvidia/src/track_face.py

systemdにservice用スクリプトを作る

  • cd /etc/systemd/system

  • emacs -nw test.service

[Unit]
Description = test service

[Service]
ExecStart = /home/nvidia/boot_all.sh
Restart = always
Type = simple

[Install]
WantedBy = multi-user.target

自動起動にserviceを登録

  • sudo systemctl enable test

disable で解除

確認

  • sudo systemctl start test

  • sudo systemctl status test

参考

クラップセンサで遊んでみる

やりたいこと

クラップセンサであそんでみる

Hand Clap Sensor VM-Clap1 - Elmwood Electronics

f:id:robonchu:20180211100212j:plain

クラップセンサを動かしてみる

Clap on 👏👏, Clap off 👏👏 - Elmwood Electronics

ここにすべてのっていた👆

f:id:robonchu:20180211100158p:plain

抜粋

/*
    Verbal Machines VM-CLAP1 sensor test
    clap twice within ¼ second to turn Arduino LED on or off

    by  scruss - 2017-06
    for Elmwood Electronics - https://elmwood.to/

    Sensor wiring:

      OUT   → Arduino Pin 2 (use INPUT_PULLUP)
      PWR   → 5V or 3.3V
      GND   → GND
*/

#define CLAPIN  2             // pin must be interrupt-capable
#define CLAP_DELAY   250      // max gap between claps to trigger

volatile boolean clap = false;                // clap detected state
boolean led_state = false;                    // LED on/off state
unsigned long clap_time, last_clap_time = 0;  // clap time records

void setup() {
  pinMode(CLAPIN, INPUT_PULLUP);
  pinMode(LED_BUILTIN, OUTPUT);     // control built-in LED by clapping
  Serial.begin(9600);
  Serial.println("# Clap sensor test ...");
  attachInterrupt(                  // register Interrupt Service Routine (ISR):
    digitalPinToInterrupt(CLAPIN),  //   pin to watch for interrupt
    heard_clap,                     //   void function to call on interrupt
    FALLING                         //   trigger interrupt on HIGH → LOW change
  );
}

void loop() {
  digitalWrite(LED_BUILTIN, led_state);  // set LED based on clap status
  if (clap) {                       // we heard a clap from ISR
    clap = false;                   // make sure we don't trigger again too soon
    last_clap_time = clap_time;     // store old clap time
    clap_time = millis();           // note current clap time
    if (clap_time - last_clap_time < CLAP_DELAY) {  // if two claps heard in ¼ s:
      Serial.println("clap clap!");                 //   notify
      led_state = !led_state;                       //   and switch LED state
    }
    else {
      Serial.println("clap!");                      // notify of only one clap
    }
  }
}

void heard_clap() {
  clap = true;      // just set clap state in ISR
}

シリアルモニタで確認

f:id:robonchu:20180211100802p:plain

雑音にも強くて、いい感じ♪

PC側の実装

空ならempty、clapが発生したらclapを表示するスクリプト

スレッドにして、mainで処理をしつつ、clapが来たらQueueに値が入りmainに渡している。

#!/usr/bin/env python
# coding: utf-8

import serial
import time
import threading
from Queue import Queue, Empty

class Thread_cls(threading.Thread):
    def run(self):
        while True: 
            read_ser = ser.readline().rstrip()
            q.put(read_ser)

if __name__ == '__main__':
    print "hello serial!"
    ser = serial.Serial()
    ser.port = '/dev/ttyUSB0'
    ser.baudrate = 9600
    ser.open()
    time.sleep(3.0)

    q = Queue()

    th = Thread_cls()
    th.daemon = True
    th.start()

    while True:
        try:
            task = q.get(False)
            print task
        except Empty:
            print "empty"