drivers/uavcan: subtree merge last working libuavcan (preserving history)

- upstream libuavcan was broken and then marked deprecated, this fully absorbs the submodule (renamed libdronecan to deconflict) up to our last good working commit and all commit history is kept
 - fixes https://github.com/PX4/PX4-Autopilot/issues/23727 (regression introduced in #23113)
 - this puts us in a much better position to evolve the library as needed now that we have full control
This commit is contained in:
Daniel Agar
2024-10-16 11:49:59 -04:00
238 changed files with 40766 additions and 46 deletions
+22 -20
View File
@@ -31,12 +31,14 @@
#
############################################################################
set(LIBUAVCAN_DIR ${PX4_SOURCE_DIR}/src/drivers/uavcan/libuavcan)
set(LIBUAVCAN_DIR_DRIVERS ${PX4_SOURCE_DIR}/src/drivers/uavcan/uavcan_drivers)
set(LIBDRONECAN_DIR ${PX4_SOURCE_DIR}/src/drivers/uavcan/libdronecan)
set(LIBDRONECAN_DIR_DRIVERS ${PX4_SOURCE_DIR}/src/drivers/uavcan/uavcan_drivers)
px4_add_git_submodule(TARGET git_uavcan PATH ${LIBUAVCAN_DIR})
set(DSDLC_DIR "${PX4_SOURCE_DIR}/src/drivers/uavcan/libdronecan/dsdl")
px4_add_git_submodule(TARGET git_uavcan_dsdl PATH ${DSDLC_DIR})
px4_add_git_submodule(TARGET git_uavcan_pydronecan PATH ${LIBDRONECAN_DIR}/libuavcan/dsdl_compiler/pydronecan)
set(UAVCAN_USE_CPP03 ON CACHE BOOL "uavcan cpp03")
set(UAVCAN_PLATFORM "generic")
if(CONFIG_ARCH_CHIP)
@@ -90,25 +92,24 @@ add_compile_options(
-Wno-address-of-packed-member
)
set(CMAKE_WARN_DEPRECATED OFF CACHE BOOL "" FORCE) # silence libuavcan deprecation warning for now (TODO: fix and remove)
add_subdirectory(${LIBUAVCAN_DIR} libuavcan EXCLUDE_FROM_ALL)
add_subdirectory(${LIBDRONECAN_DIR} libdronecan EXCLUDE_FROM_ALL)
add_dependencies(uavcan prebuild_targets)
# driver
add_subdirectory(${LIBUAVCAN_DIR_DRIVERS}/${UAVCAN_DRIVER}/driver libuavcan_drivers EXCLUDE_FROM_ALL)
add_subdirectory(${LIBDRONECAN_DIR_DRIVERS}/${UAVCAN_DRIVER}/driver libuavcan_drivers EXCLUDE_FROM_ALL)
target_include_directories(uavcan_${UAVCAN_DRIVER}_driver PUBLIC
${LIBUAVCAN_DIR}/libuavcan/include
${LIBUAVCAN_DIR}/libuavcan/include/dsdlc_generated
${LIBDRONECAN_DIR}/libuavcan/include
${LIBDRONECAN_DIR}/libuavcan/include/dsdlc_generated
)
# generated DSDL
set(DSDLC_DIR "${PX4_SOURCE_DIR}/src/drivers/uavcan/dsdl")
set(DSDLC_INPUTS
"${LIBUAVCAN_DIR}/dsdl/ardupilot"
"${LIBUAVCAN_DIR}/dsdl/com"
"${LIBUAVCAN_DIR}/dsdl/cuav"
"${LIBUAVCAN_DIR}/dsdl/dronecan"
"${LIBUAVCAN_DIR}/dsdl/uavcan"
"${DSDLC_DIR}/ardupilot"
"${DSDLC_DIR}/com"
"${DSDLC_DIR}/cuav"
"${DSDLC_DIR}/dronecan"
"${DSDLC_DIR}/uavcan"
)
set(DSDLC_OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/include/dsdlc_generated")
@@ -119,7 +120,7 @@ foreach(DSDLC_INPUT ${DSDLC_INPUTS})
endforeach(DSDLC_INPUT)
add_custom_command(OUTPUT px4_uavcan_dsdlc_run.stamp
COMMAND
${PYTHON_EXECUTABLE} ${LIBUAVCAN_DIR}/libuavcan/dsdl_compiler/libuavcan_dsdlc
${PYTHON_EXECUTABLE} ${LIBDRONECAN_DIR}/libuavcan/dsdl_compiler/libuavcan_dsdlc
--outdir ${DSDLC_OUTPUT} ${DSDLC_INPUTS}
#--verbose
COMMAND ${CMAKE_COMMAND} -E touch px4_uavcan_dsdlc_run.stamp
@@ -138,10 +139,10 @@ px4_add_module(
#-DDEBUG_BUILD
INCLUDES
${DSDLC_OUTPUT}
${LIBUAVCAN_DIR}/libuavcan/include
${LIBUAVCAN_DIR}/libuavcan/include/dsdlc_generated
${LIBUAVCAN_DIR}/libuavcan_drivers/posix/include
${LIBUAVCAN_DIR_DRIVERS}/${UAVCAN_DRIVER}/driver/include
${LIBDRONECAN_DIR}/libuavcan/include
${LIBDRONECAN_DIR}/libuavcan/include/dsdlc_generated
${LIBDRONECAN_DIR_DRIVERS}/posix/include
${LIBDRONECAN_DIR_DRIVERS}/${UAVCAN_DRIVER}/driver/include
SRCS
arming_status.cpp
arming_status.hpp
@@ -191,7 +192,8 @@ px4_add_module(
mixer_module
version
git_uavcan
git_uavcan_dsdl
git_uavcan_pydronecan
uavcan_${UAVCAN_DRIVER}_driver
drivers_rangefinder # Fix undefined reference when no distance sensors are selected
+30
View File
@@ -0,0 +1,30 @@
# Build outputs
*.o
*.d
lib*.so
lib*.so.*
*.a
build*/
.dep
__pycache__
*.pyc
# Eclipse
.metadata
.settings
.project
.cproject
.pydevproject
.gdbinit
# vsstudio code
.vscode
# vagrant
.vagrant
# libuavcan DSDL compiler default output directory
dsdlc_generated
# Log files
*.log
@@ -0,0 +1,113 @@
#
# Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
#
cmake_minimum_required(VERSION 3.9)
project(uavcan C CXX)
#
# Build options
#
if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
set(DEFAULT_UAVCAN_PLATFORM "linux")
endif()
# options are listed in a table format below
set(opts
# name: type: default value: string options list : description
"CMAKE_BUILD_TYPE:STRING:RelWithDebInfo:Debug Release RelWithDebInfo MinSizeRel:Build type."
"CMAKE_CXX_FLAGS:STRING:::C++ flags."
"CMAKE_C_FLAGS:STRING:::C flags."
"UAVCAN_PLATFORM:STRING:generic:generic kinetis linux stm32:Platform."
"CONTINUOUS_INTEGRATION_BUILD:BOOL:OFF::Disable error redirection and timing tests"
"UAVCAN_CMAKE_VERBOSE:BOOL:OFF::Verbose CMake configure output"
)
foreach(_opt ${opts})
# arguments are : delimited
string(REPLACE ":" ";" _opt ${_opt})
list(GET _opt 0 _name)
list(GET _opt 1 _type)
list(GET _opt 2 _default)
list(GET _opt 3 _options)
list(GET _opt 4 _descr)
# options are space delimited
string(REPLACE " " ";" _options "${_options}")
# if a default has not already been defined, use default from table
if(NOT DEFINED DEFAULT_${_name})
set(DEFAULT_${_name} ${_default})
endif()
# option has not been set already or it is empty, set it with the default
if(NOT DEFINED ${_name} OR ${_name} STREQUAL "")
set(${_name} ${DEFAULT_${_name}})
endif()
# create a cache from the variable and force it to set
if(UAVCAN_CMAKE_VERBOSE)
message(STATUS "${_name}\t: ${${_name}} : ${_descr}")
endif()
set("${_name}" "${${_name}}" CACHE "${_type}" "${_descr}" FORCE)
# if an options list is provided for the cache, set it
if("${_type}" STREQUAL "STRING" AND NOT "${_options}" STREQUAL "")
set_property(CACHE ${_name} PROPERTY STRINGS ${_options})
endif()
endforeach()
#
# Set flags
#
include_directories(
./libuavcan/include/
./libuavcan/include/dsdlc_generated
)
#
# Install
#
# DSDL definitions
install(DIRECTORY dsdl DESTINATION share/uavcan)
#
# Googletest
#
if( CMAKE_BUILD_TYPE STREQUAL "Debug" )
# (Taken from googletest/README.md documentation)
# GTest executables
# Download and unpack googletest at configure time
configure_file(CMakeLists.txt.in googletest-download/CMakeLists.txt)
execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" .
RESULT_VARIABLE result
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/googletest-download )
if(result)
message(WARNING "CMake step for googletest failed: ${result}")
else()
execute_process(COMMAND ${CMAKE_COMMAND} --build .
RESULT_VARIABLE result
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/googletest-download )
if(result)
message(WARNING "Build step for googletest failed: ${result}")
else()
# Prevent overriding the parent project's compiler/linker
# settings on Windows
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
# Add googletest directly to our build. This defines
# the gtest and gtest_main targets.
add_subdirectory(${CMAKE_BINARY_DIR}/googletest-src
${CMAKE_BINARY_DIR}/googletest-build
EXCLUDE_FROM_ALL)
set(GTEST_FOUND ON)
set(BUILD_TESTING ON)
enable_testing()
endif()
endif()
endif()
#
# Subdirectories
#
# library
add_subdirectory(libuavcan)
# vim: set et ft=cmake fenc=utf-8 ff=unix sts=4 sw=4 ts=4 :
@@ -0,0 +1,15 @@
cmake_minimum_required(VERSION 3.9)
project(googletest-download NONE)
include(ExternalProject)
ExternalProject_Add(googletest
GIT_REPOSITORY https://github.com/google/googletest.git
GIT_TAG ba96d0b1161f540656efdaed035b3c062b60e006
SOURCE_DIR "${CMAKE_BINARY_DIR}/googletest-src"
BINARY_DIR "${CMAKE_BINARY_DIR}/googletest-build"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
+20
View File
@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2014 Pavel Kirienko
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+120
View File
@@ -0,0 +1,120 @@
DroneCAN stack in C++
=====================
Portable reference implementation of the [DroneCAN protocol stack](http://dronecan.org) in C++ for embedded systems
and Linux.
DroneCAN is a lightweight protocol designed for reliable communication in aerospace and robotic applications via CAN bus.
## Documentation
* [DroneCAN website](http://dronecan.org)
* [DroneCAN forum](https://dronecan.org/discord)
## Library usage
### Cloning the repository
```bash
git clone https://github.com/DroneCAN/libuavcan
cd libuavcan
git submodule update --init
```
If this repository is used as a git submodule in your project, make sure to use `--recursive` when updating it.
### Using in a Linux application
Libuavcan can be built as a static library and installed on the system globally as shown below.
```bash
mkdir build
cd build
cmake .. # Default build type is RelWithDebInfo, which can be overriden if needed.
make -j8
sudo make install
```
The following components will be installed:
* Libuavcan headers and the static library
* Generated DSDL headers
* Libuavcan DSDL compiler (a Python script named `libuavcan_dsdlc`)
* Libuavcan DSDL compiler's support library (a Python package named `libuavcan_dsdl_compiler`)
Note that Pyuavcan (an implementation of DroneCAN in Python) will not be installed.
You will need to install it separately if you intend to use the Libuavcan's DSDL compiler in your applications.
It is also possible to use the library as a submodule rather than installing it system-wide.
Please refer to the example applications supplied with the Linux platform driver for more information.
### Using with an embedded system
For ARM targets, it is recommended to use [GCC ARM Embedded](https://launchpad.net/gcc-arm-embedded);
however, any other standard-compliant C++ compiler should also work.
## Library development
Despite the fact that the library itself can be used on virtually any platform that has a standard-compliant
C++11 compiler, the library development process assumes that the host OS is Linux.
Prerequisites:
* Google test library for C++ - gtest (dowloaded as part of the build from [github](https://github.com/google/googletest))
* C++11 capable compiler with GCC-like interface (e.g. GCC, Clang)
* CMake 2.8+
* Optional: static analysis tool for C++ - cppcheck (on Debian/Ubuntu use package `cppcheck`)
Building the debug version and running the unit tests:
```bash
mkdir build
cd build
cmake .. -DCMAKE_BUILD_TYPE=Debug
make -j8
make ARGS=-VV test
```
Test outputs can be found in the build directory under `libuavcan`.
> Note that unit tests suffixed with "_RealTime" must be executed in real time, otherwise they may produce false warnings;
this implies that they will likely fail if ran on a virtual machine or on a highly loaded system.
### Vagrant
Vagrant can be used to setup a compatible Ubuntu virtual image. Follow the instructions on [Vagrantup](https://www.vagrantup.com/) to install virtualbox and vagrant then do:
```bash
vagrant up
vagrant ssh
mkdir build
cd build
mkdir build && cd build && cmake .. -DCMAKE_BUILD_TYPE=Debug -DCONTINUOUS_INTEGRATION_BUILD=1
```
> Note that -DCONTINUOUS_INTEGRATION_BUILD=1 is required for this build as the realtime unit tests will not work on a virt.
You can build using commands like:
```bash
vagrant ssh -c "cd /vagrant/build && make -j4 && make test"
```
or to run a single test:
```bash
vagrant ssh -c "cd /vagrant/build && make libuavcan_test && ./libuavcan/libuavcan_test --gtest_filter=Node.Basic"
```
### Developing with Eclipse
An Eclipse project can be generated like that:
```bash
cmake ../../libuavcan -G"Eclipse CDT4 - Unix Makefiles" \
-DCMAKE_ECLIPSE_VERSION=4.3 \
-DCMAKE_BUILD_TYPE=Debug \
-DCMAKE_CXX_COMPILER_ARG1=-std=c++11
```
Path `../../libuavcan` in the command above points at the directory where the top-level `CMakeLists.txt` is located;
you may need to adjust this per your environment.
Note that the directory where Eclipse project is generated must not be a descendant of the source directory.
@@ -0,0 +1,146 @@
#
# Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
#
cmake_minimum_required(VERSION 3.9)
if(DEFINED CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "Debug Release RelWithDebInfo MinSizeRel")
else()
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "Debug Release RelWithDebInfo MinSizeRel")
endif()
# Detecting whether we need to add debug targets
string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type_lower)
if (build_type_lower STREQUAL "debug")
set(DEBUG_BUILD 1)
message(STATUS "Debug build")
else ()
set(DEBUG_BUILD 0)
endif ()
project(libuavcan)
find_package(PythonInterp)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(COMPILER_IS_GCC_COMPATIBLE 1)
else ()
set(COMPILER_IS_GCC_COMPATIBLE 0)
endif ()
#
# DSDL compiler invocation
# Probably output files should be saved into CMake output dir?
#
set(DSDLC_INPUTS "test/dsdl_test/root_ns_a" "test/dsdl_test/root_ns_b" "${CMAKE_CURRENT_SOURCE_DIR}/../dsdl/uavcan")
set(DSDLC_OUTPUT "include/dsdlc_generated")
set(DSDLC_INPUT_FILES "")
foreach(DSDLC_INPUT ${DSDLC_INPUTS})
file(GLOB_RECURSE DSDLC_NEW_INPUT_FILES ${CMAKE_CURRENT_SOURCE_DIR} "${DSDLC_INPUT}/*.uavcan")
set(DSDLC_INPUT_FILES ${DSDLC_INPUT_FILES} ${DSDLC_NEW_INPUT_FILES})
endforeach(DSDLC_INPUT)
add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/libuavcan_dsdlc_run.stamp
COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/dsdl_compiler/libuavcan_dsdlc ${DSDLC_INPUTS} -O${DSDLC_OUTPUT}
COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_BINARY_DIR}/libuavcan_dsdlc_run.stamp
DEPENDS ${DSDLC_INPUT_FILES}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Running dsdl compiler")
add_custom_target(libuavcan_dsdlc DEPENDS ${CMAKE_BINARY_DIR}/libuavcan_dsdlc_run.stamp)
include_directories(${DSDLC_OUTPUT})
#
# Compiler flags
#
if (COMPILER_IS_GCC_COMPATIBLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wundef")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
endif ()
if (DEBUG_BUILD)
add_definitions(-DUAVCAN_DEBUG=1)
endif ()
include_directories(include)
#
# libuavcan
#
file(GLOB_RECURSE LIBUAVCAN_CXX_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "src/*.cpp")
add_library(uavcan STATIC ${LIBUAVCAN_CXX_FILES})
add_dependencies(uavcan libuavcan_dsdlc)
install(TARGETS uavcan DESTINATION lib)
install(DIRECTORY include/uavcan DESTINATION include)
install(DIRECTORY include/dsdlc_generated/uavcan DESTINATION include) # Generated and lib's .hpp
#
# Tests and static analysis - only for debug builds
#
function(add_libuavcan_test name library flags) # Adds GTest executable and creates target to execute it every build
find_package(Threads REQUIRED)
file(GLOB_RECURSE TEST_CXX_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "test/*.cpp")
add_executable(${name} ${TEST_CXX_FILES})
add_dependencies(${name} ${library})
if (flags)
set_target_properties(${name} PROPERTIES COMPILE_FLAGS ${flags})
endif ()
target_link_libraries(${name} gmock_main)
target_link_libraries(${name} ${library})
if (${UAVCAN_PLATFORM} STREQUAL "linux")
target_link_libraries(${name} rt)
endif()
# Tests run automatically upon successful build
# If failing tests need to be investigated with debugger, use 'make --ignore-errors'
if (CONTINUOUS_INTEGRATION_BUILD)
# Don't redirect test output, and don't run tests suffixed with "RealTime"
add_test(NAME ${name}
COMMAND ${name} --gtest_filter=-*RealTime
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
else ()
add_test(NAME ${name}
COMMAND ${name} 1>"${name}.log" 2>&1
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()
endfunction()
if (DEBUG_BUILD)
message(STATUS "Debug build (note: requires gtest)")
if (COMPILER_IS_GCC_COMPATIBLE)
# No such thing as too many warnings
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Werror -pedantic -Wfloat-equal -Wconversion")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-conversion -Wcast-align -Wmissing-declarations -Wlogical-op")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wdouble-promotion -Wswitch-enum -Wtype-limits -Wno-error=array-bounds")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wzero-as-null-pointer-constant -Wnon-virtual-dtor")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Woverloaded-virtual -Wsign-promo -Wold-style-cast")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations")
set(optim_flags "-O3 -DNDEBUG -g0")
else ()
message(STATUS "Compiler ID: ${CMAKE_CXX_COMPILER_ID}")
message(FATAL_ERROR "This compiler cannot be used to build tests; use release build instead.")
endif ()
# Additional flavours of the library
add_library(uavcan_optim STATIC ${LIBUAVCAN_CXX_FILES})
set_target_properties(uavcan_optim PROPERTIES COMPILE_FLAGS ${optim_flags})
add_dependencies(uavcan_optim libuavcan_dsdlc)
if (GTEST_FOUND)
message(STATUS "GTest found, tests will be built and run.")
add_libuavcan_test(libuavcan_test uavcan "") # Default
add_libuavcan_test(libuavcan_test_optim uavcan_optim "${optim_flags}") # Max optimization
else (GTEST_FOUND)
message(STATUS "GTest was not found, tests will not be built")
endif (GTEST_FOUND)
else ()
message(STATUS "Release build type: " ${CMAKE_BUILD_TYPE})
endif ()
# vim: set et ft=cmake fenc=utf-8 ff=unix sts=4 sw=4 ts=4 :
+18
View File
@@ -0,0 +1,18 @@
#!/bin/sh
#
# cppcheck static analysis
# For Debian based: apt-get install cppcheck
#
num_cores=$(grep -c ^processor /proc/cpuinfo)
if [ -z "$num_cores" ]; then
echo "Hey, it looks like we're not on Linux. Please fix this script to add support for this OS."
num_cores=4
fi
echo "Number of threads for cppcheck: $num_cores"
# TODO: with future versions of cppcheck, add --library=glibc
cppcheck . --error-exitcode=1 --quiet --enable=all --platform=unix64 --std=c99 --std=c++11 \
--inline-suppr --force --template=gcc -j$num_cores \
-U__BIGGEST_ALIGNMENT__ -UUAVCAN_MEM_POOL_BLOCK_SIZE -UBIG_ENDIAN -UBYTE_ORDER \
-Iinclude $@
@@ -0,0 +1,313 @@
#!/usr/bin/env python
#
# UAVCAN DSDL compiler for libuavcan
#
# Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
#
'''
This module implements the core functionality of the UAVCAN DSDL compiler for libuavcan.
Supported Python versions: 3.2+, 2.7.
It accepts a list of root namespaces and produces the set of C++ header files for libuavcan.
It is based on the DSDL parsing package from pyuavcan.
'''
from __future__ import division, absolute_import, print_function, unicode_literals
import sys, os, logging, errno, re
from .pyratemp import Template
from dronecan import dsdl
# Python 2.7 compatibility
try:
str = unicode
except NameError:
pass
OUTPUT_FILE_EXTENSION = 'hpp'
OUTPUT_FILE_PERMISSIONS = 0o444 # Read only for all
TEMPLATE_FILENAME = os.path.join(os.path.dirname(__file__), 'data_type_template.tmpl')
__all__ = ['run', 'logger', 'DsdlCompilerException']
class DsdlCompilerException(Exception):
pass
logger = logging.getLogger(__name__)
def run(source_dirs, include_dirs, output_dir):
'''
This function takes a list of root namespace directories (containing DSDL definition files to parse), a
possibly empty list of search directories (containing DSDL definition files that can be referenced from the types
that are going to be parsed), and the output directory path (possibly nonexistent) where the generated C++
header files will be stored.
Note that this module features lazy write, i.e. if an output file does already exist and its content is not going
to change, it will not be overwritten. This feature allows to avoid unnecessary recompilation of dependent object
files.
Args:
source_dirs List of root namespace directories to parse.
include_dirs List of root namespace directories with referenced types (possibly empty). This list is
automaitcally extended with source_dirs.
output_dir Output directory path. Will be created if doesn't exist.
'''
assert isinstance(source_dirs, list)
assert isinstance(include_dirs, list)
output_dir = str(output_dir)
types = run_parser(source_dirs, include_dirs + source_dirs)
if not types:
die('No type definitions were found')
logger.info('%d types total', len(types))
run_generator(types, output_dir)
# -----------------
def pretty_filename(filename):
try:
a = os.path.abspath(filename)
r = os.path.relpath(filename)
return a if '..' in r else r
except ValueError:
return filename
def type_output_filename(t):
assert t.category == t.CATEGORY_COMPOUND
return t.full_name.replace('.', os.path.sep) + '.' + OUTPUT_FILE_EXTENSION
def makedirs(path):
try:
try:
os.makedirs(path, exist_ok=True) # May throw "File exists" when executed as root, which is wrong
except TypeError:
os.makedirs(path) # Python 2.7 compatibility
except OSError as ex:
if ex.errno != errno.EEXIST: # http://stackoverflow.com/questions/12468022
raise
def die(text):
raise DsdlCompilerException(str(text))
def run_parser(source_dirs, search_dirs):
try:
types = dsdl.parse_namespaces(source_dirs, search_dirs)
except dsdl.DsdlException as ex:
logger.info('Parser failure', exc_info=True)
die(ex)
return types
def run_generator(types, dest_dir):
try:
template_expander = make_template_expander(TEMPLATE_FILENAME)
dest_dir = os.path.abspath(dest_dir) # Removing '..'
makedirs(dest_dir)
for t in types:
logger.info('Generating type %s', t.full_name)
filename = os.path.join(dest_dir, type_output_filename(t))
text = generate_one_type(template_expander, t)
write_generated_data(filename, text)
except Exception as ex:
logger.info('Generator failure', exc_info=True)
die(ex)
def write_generated_data(filename, data):
dirname = os.path.dirname(filename)
makedirs(dirname)
# Lazy update - file will not be rewritten if its content is not going to change
if os.path.exists(filename):
with open(filename) as f:
existing_data = f.read()
if data == existing_data:
logger.info('Up to date [%s]', pretty_filename(filename))
return
logger.info('Rewriting [%s]', pretty_filename(filename))
os.remove(filename)
else:
logger.info('Creating [%s]', pretty_filename(filename))
# Full rewrite
with open(filename, 'w') as f:
f.write(data)
try:
os.chmod(filename, OUTPUT_FILE_PERMISSIONS)
except (OSError, IOError) as ex:
logger.warning('Failed to set permissions for %s: %s', pretty_filename(filename), ex)
def type_to_cpp_type(t):
if t.category == t.CATEGORY_PRIMITIVE:
cast_mode = {
t.CAST_MODE_SATURATED: '::uavcan::CastModeSaturate',
t.CAST_MODE_TRUNCATED: '::uavcan::CastModeTruncate',
}[t.cast_mode]
if t.kind == t.KIND_FLOAT:
return '::uavcan::FloatSpec< %d, %s >' % (t.bitlen, cast_mode)
else:
signedness = {
t.KIND_BOOLEAN: '::uavcan::SignednessUnsigned',
t.KIND_UNSIGNED_INT: '::uavcan::SignednessUnsigned',
t.KIND_SIGNED_INT: '::uavcan::SignednessSigned',
}[t.kind]
return '::uavcan::IntegerSpec< %d, %s, %s >' % (t.bitlen, signedness, cast_mode)
elif t.category == t.CATEGORY_ARRAY:
value_type = type_to_cpp_type(t.value_type)
mode = {
t.MODE_STATIC: '::uavcan::ArrayModeStatic',
t.MODE_DYNAMIC: '::uavcan::ArrayModeDynamic',
}[t.mode]
return '::uavcan::Array< %s, %s, %d >' % (value_type, mode, t.max_size)
elif t.category == t.CATEGORY_COMPOUND:
return '::' + t.full_name.replace('.', '::')
elif t.category == t.CATEGORY_VOID:
return '::uavcan::IntegerSpec< %d, ::uavcan::SignednessUnsigned, ::uavcan::CastModeSaturate >' % t.bitlen
else:
raise DsdlCompilerException('Unknown type category: %s' % t.category)
def generate_one_type(template_expander, t):
t.short_name = t.full_name.split('.')[-1]
t.cpp_type_name = t.short_name + '_'
t.cpp_full_type_name = '::' + t.full_name.replace('.', '::')
t.include_guard = t.full_name.replace('.', '_').upper() + '_HPP_INCLUDED'
# Dependencies (no duplicates)
def fields_includes(fields):
def detect_include(t):
if t.category == t.CATEGORY_COMPOUND:
return type_output_filename(t)
if t.category == t.CATEGORY_ARRAY:
return detect_include(t.value_type)
return list(sorted(set(filter(None, [detect_include(x.type) for x in fields]))))
if t.kind == t.KIND_MESSAGE:
t.cpp_includes = fields_includes(t.fields)
else:
t.cpp_includes = fields_includes(t.request_fields + t.response_fields)
t.cpp_namespace_components = t.full_name.split('.')[:-1]
t.has_default_dtid = t.default_dtid is not None
# Attribute types
def inject_cpp_types(attributes):
void_index = 0
for a in attributes:
a.cpp_type = type_to_cpp_type(a.type)
a.void = a.type.category == a.type.CATEGORY_VOID
if a.void:
assert not a.name
a.name = '_void_%d' % void_index
void_index += 1
if t.kind == t.KIND_MESSAGE:
inject_cpp_types(t.fields)
inject_cpp_types(t.constants)
t.all_attributes = t.fields + t.constants
t.union = t.union and len(t.fields)
else:
inject_cpp_types(t.request_fields)
inject_cpp_types(t.request_constants)
inject_cpp_types(t.response_fields)
inject_cpp_types(t.response_constants)
t.all_attributes = t.request_fields + t.request_constants + t.response_fields + t.response_constants
t.request_union = t.request_union and len(t.request_fields)
t.response_union = t.response_union and len(t.response_fields)
# Constant properties
def inject_constant_info(constants):
for c in constants:
if c.type.kind == c.type.KIND_FLOAT:
float(c.string_value) # Making sure that this is a valid float literal
c.cpp_value = c.string_value
else:
int(c.string_value) # Making sure that this is a valid integer literal
c.cpp_value = c.string_value
if c.type.kind == c.type.KIND_UNSIGNED_INT:
c.cpp_value += 'U'
if t.kind == t.KIND_MESSAGE:
inject_constant_info(t.constants)
else:
inject_constant_info(t.request_constants)
inject_constant_info(t.response_constants)
# Data type kind
t.cpp_kind = {
t.KIND_MESSAGE: '::uavcan::DataTypeKindMessage',
t.KIND_SERVICE: '::uavcan::DataTypeKindService',
}[t.kind]
# Generation
text = template_expander(t=t) # t for Type
text = '\n'.join(x.rstrip() for x in text.splitlines())
text = text.replace('\n\n\n\n\n', '\n\n').replace('\n\n\n\n', '\n\n').replace('\n\n\n', '\n\n')
text = text.replace('{\n\n ', '{\n ')
return text
def make_template_expander(filename):
'''
Templating is based on pyratemp (http://www.simple-is-better.org/template/pyratemp.html).
The pyratemp's syntax is rather verbose and not so human friendly, so we define some
custom extensions to make it easier to read and write.
The resulting syntax somewhat resembles Mako (which was used earlier instead of pyratemp):
Substitution:
${expression}
Line joining through backslash (replaced with a single space):
${foo(bar(very_long_arument=42, \
second_line=72))}
Blocks:
% for a in range(10):
% if a == 5:
${foo()}
% endif
% endfor
The extended syntax is converted into pyratemp's through regexp substitution.
'''
with open(filename) as f:
template_text = f.read()
# Backslash-newline elimination
template_text = re.sub(r'\\\r{0,1}\n\ *', r' ', template_text)
# Substitution syntax transformation: ${foo} ==> $!foo!$
template_text = re.sub(r'([^\$]{0,1})\$\{([^\}]+)\}', r'\1$!\2!$', template_text)
# Flow control expression transformation: % foo: ==> <!--(foo)-->
template_text = re.sub(r'(?m)^(\ *)\%\ *(.+?):{0,1}$', r'\1<!--(\2)-->', template_text)
# Block termination transformation: <!--(endfoo)--> ==> <!--(end)-->
template_text = re.sub(r'\<\!--\(end[a-z]+\)--\>', r'<!--(end)-->', template_text)
# Pyratemp workaround.
# The problem is that if there's no empty line after a macro declaration, first line will be doubly indented.
# Workaround:
# 1. Remove trailing comments
# 2. Add a newline after each macro declaration
template_text = re.sub(r'\ *\#\!.*', '', template_text)
template_text = re.sub(r'(\<\!--\(macro\ [a-zA-Z0-9_]+\)--\>.*?)', r'\1\n', template_text)
# Preprocessed text output for debugging
# with open(filename + '.d', 'w') as f:
# f.write(template_text)
template = Template(template_text)
def expand(**args):
# This function adds one indentation level (4 spaces); it will be used from the template
args['indent'] = lambda text, idnt = ' ': idnt + text.replace('\n', '\n' + idnt)
# This function works like enumerate(), telling you whether the current item is the last one
def enum_last_value(iterable, start=0):
it = iter(iterable)
count = start
try:
last = next(it)
except StopIteration:
return
for val in it:
yield count, False, last
last = val
count += 1
yield count, True, last
args['enum_last_value'] = enum_last_value
return template(**args)
return expand
@@ -0,0 +1,558 @@
/*
* UAVCAN data structure definition for libuavcan.
*
* Autogenerated, do not edit.
*
* Source file: ${t.source_file}
*/
#ifndef ${t.include_guard}
#define ${t.include_guard}
#include <uavcan/build_config.hpp>
#include <uavcan/node/global_data_type_registry.hpp>
#include <uavcan/marshal/types.hpp>
% for inc in t.cpp_includes:
#include <${inc}>
% endfor
/******************************* Source text **********************************
% for line in t.source_text.strip().splitlines():
${line}
% endfor
******************************************************************************/
/********************* DSDL signature source definition ***********************
% for line in t.get_dsdl_signature_source_definition().splitlines():
${line}
% endfor
******************************************************************************/
% for a in t.all_attributes:
#undef ${a.name}
% endfor
% for nsc in t.cpp_namespace_components:
namespace ${nsc}
{
% endfor
% if t.kind != t.KIND_SERVICE:
template <int _tmpl>
% endif
struct UAVCAN_EXPORT ${t.cpp_type_name}
{
<!--(macro generate_primary_body)--> #! type_name, max_bitlen, fields, constants, union
typedef const ${type_name}<_tmpl>& ParameterType;
typedef ${type_name}<_tmpl>& ReferenceType;
<!--(macro expand_attr_types)--> #! group_name, attrs
struct ${group_name}
{
% for a in attrs:
typedef ${a.cpp_type} ${a.name};
% endfor
};
<!--(end)-->
${expand_attr_types(group_name='ConstantTypes', attrs=constants)}
${expand_attr_types(group_name='FieldTypes', attrs=fields)}
% if union:
struct Tag
{
enum Type
{
% for idx,last,a in enum_last_value(fields):
${a.name}${',' if not last else ''}
% endfor
};
};
typedef ::uavcan::IntegerSpec< ::uavcan::IntegerBitLen< ${len(fields)}U - 1U >::Result,
::uavcan::SignednessUnsigned, ::uavcan::CastModeTruncate > TagType;
<!--(macro expand_enum_per_field)--> #! enum_name, enum_comparator
enum
{
${enum_name} = TagType::BitLen +
% for idx,last,a in enum_last_value(fields):
% if not last:
::uavcan::${enum_comparator}<FieldTypes::${a.name}::${enum_name},
% else:
FieldTypes::${a.name}::${enum_name} ${'>::Result' * (len(fields) - 1)}
% endif
% endfor
};
<!--(end)-->
${expand_enum_per_field(enum_name='MinBitLen', enum_comparator='EnumMin')}
${expand_enum_per_field(enum_name='MaxBitLen', enum_comparator='EnumMax')}
% else:
<!--(macro expand_enum_per_field)--> #! enum_name
enum
{
${enum_name}
% for idx,a in enumerate(fields):
${'=' if idx == 0 else '+'} FieldTypes::${a.name}::${enum_name}
% endfor
};
<!--(end)-->
${expand_enum_per_field(enum_name='MinBitLen')}
${expand_enum_per_field(enum_name='MaxBitLen')}
% endif
// Constants
% for a in constants:
static const typename ::uavcan::StorageType< typename ConstantTypes::${a.name} >::Type ${a.name}; // ${a.init_expression}
% endfor
// Fields
% for a in [x for x in fields if not x.void]:
typename ::uavcan::StorageType< typename FieldTypes::${a.name} >::Type ${a.name};
% endfor
% if union:
private:
typename ::uavcan::StorageType< TagType >::Type _tag_; // The name is mangled to avoid clashing with fields
template <typename Tag::Type T>
struct TagToType;
public:
% endif
${type_name}()
% for idx,a in enumerate([x for x in fields if not x.void]):
${':' if idx == 0 else ','} ${a.name}()
% endfor
% if union:
, _tag_()
% endif
{
::uavcan::StaticAssert<_tmpl == 0>::check(); // Usage check
#if UAVCAN_DEBUG
/*
* Cross-checking MaxBitLen provided by the DSDL compiler.
* This check shall never be performed in user code because MaxBitLen value
* actually depends on the nested types, thus it is not invariant.
*/
::uavcan::StaticAssert<${max_bitlen} == MaxBitLen>::check();
#endif
}
bool operator==(ParameterType rhs) const;
bool operator!=(ParameterType rhs) const { return !operator==(rhs); }
/**
* This comparison is based on @ref uavcan::areClose(), which ensures proper comparison of
* floating point fields at any depth.
*/
bool isClose(ParameterType rhs) const;
static int encode(ParameterType self, ::uavcan::ScalarCodec& codec,
::uavcan::TailArrayOptimizationMode tao_mode = ::uavcan::TailArrayOptEnabled);
static int decode(ReferenceType self, ::uavcan::ScalarCodec& codec,
::uavcan::TailArrayOptimizationMode tao_mode = ::uavcan::TailArrayOptEnabled);
% if union:
/**
* Explicit access to the tag.
* It is safer to use is()/as()/to() instead.
*/
typename Tag::Type getTag() const { return typename Tag::Type(_tag_); }
void setTag(typename Tag::Type x) { _tag_ = typename ::uavcan::StorageType< TagType >::Type(x); }
/**
* Whether the union is set to the given type.
* Access by tag; this will work even if there are non-unique types within the union.
*/
bool is(typename Tag::Type x) const { return typename Tag::Type(_tag_) == x; }
/**
* If the union is currently set to the type T, returns pointer to the appropriate field.
* If the union is set to another type, returns null pointer.
*/
template <typename Tag::Type T>
inline const typename TagToType<T>::StorageType* as() const;
/**
* Switches the union to the given type and returns a mutable reference to the appropriate field.
* If the previous type was different, a default constructor will be called first.
*/
template <typename Tag::Type T>
inline typename TagToType<T>::StorageType& to();
% endif
<!--(end)-->
% if t.kind == t.KIND_SERVICE:
template <int _tmpl>
struct Request_
{
${indent(generate_primary_body(type_name='Request_', max_bitlen=t.get_max_bitlen_request(), \
fields=t.request_fields, constants=t.request_constants, \
union=t.request_union))}
};
template <int _tmpl>
struct Response_
{
${indent(generate_primary_body(type_name='Response_', max_bitlen=t.get_max_bitlen_response(), \
fields=t.response_fields, constants=t.response_constants, \
union=t.response_union))}
};
typedef Request_<0> Request;
typedef Response_<0> Response;
% else:
${generate_primary_body(type_name=t.cpp_type_name, max_bitlen=t.get_max_bitlen(), \
fields=t.fields, constants=t.constants, union=t.union)}
% endif
/*
* Static type info
*/
enum { DataTypeKind = ${t.cpp_kind} };
% if t.has_default_dtid:
enum { DefaultDataTypeID = ${t.default_dtid} };
% else:
// This type has no default data type ID
% endif
static const char* getDataTypeFullName()
{
return "${t.full_name}";
}
static void extendDataTypeSignature(::uavcan::DataTypeSignature& signature)
{
signature.extend(getDataTypeSignature());
}
static ::uavcan::DataTypeSignature getDataTypeSignature();
% if t.kind == t.KIND_SERVICE:
private:
${t.cpp_type_name}(); // Don't create objects of this type. Use Request/Response instead.
% endif
};
/*
* Out of line struct method definitions
*/
<!--(macro define_out_of_line_struct_methods)--> #! scope_prefix, fields, union
template <int _tmpl>
bool ${scope_prefix}<_tmpl>::operator==(ParameterType rhs) const
{
% if union:
if (_tag_ != rhs._tag_)
{
return false;
}
% for idx,a in enumerate(fields):
if (_tag_ == ${idx})
{
return ${a.name} == rhs.${a.name};
}
% endfor
UAVCAN_ASSERT(0); // Invalid tag
return false;
% else:
% if fields:
return
% for idx,last,a in enum_last_value([x for x in fields if not x.void]):
${a.name} == rhs.${a.name}${' &&' if not last else ';'}
% endfor
% else:
(void)rhs;
return true;
% endif
% endif
}
template <int _tmpl>
bool ${scope_prefix}<_tmpl>::isClose(ParameterType rhs) const
{
% if union:
if (_tag_ != rhs._tag_)
{
return false;
}
% for idx,a in enumerate(fields):
if (_tag_ == ${idx})
{
return ::uavcan::areClose(${a.name}, rhs.${a.name});
}
% endfor
UAVCAN_ASSERT(0); // Invalid tag
return false;
% else:
% if fields:
return
% for idx,last,a in enum_last_value([x for x in fields if not x.void]):
::uavcan::areClose(${a.name}, rhs.${a.name})${' &&' if not last else ';'}
% endfor
% else:
(void)rhs;
return true;
% endif
% endif
}
<!--(macro generate_codec_calls_per_field)--> #! call_name, self_parameter_type
template <int _tmpl>
int ${scope_prefix}<_tmpl>::${call_name}(${self_parameter_type} self, ::uavcan::ScalarCodec& codec,
::uavcan::TailArrayOptimizationMode tao_mode)
{
(void)self;
(void)codec;
(void)tao_mode;
% if union:
const int res = TagType::${call_name}(self._tag_, codec, ::uavcan::TailArrayOptDisabled);
if (res <= 0)
{
return res;
}
% for idx,a in enumerate(fields):
if (self._tag_ == ${idx})
{
return FieldTypes::${a.name}::${call_name}(self.${a.name}, codec, tao_mode);
}
% endfor
return -1; // Invalid tag value
% else:
% for a in [x for x in fields if x.void]:
typename ::uavcan::StorageType< typename FieldTypes::${a.name} >::Type ${a.name} = 0;
% endfor
int res = 1;
% for idx,last,a in enum_last_value(fields):
res = FieldTypes::${a.name}::${call_name}(${'self.' * (not a.void)}${a.name}, codec, \
${'::uavcan::TailArrayOptDisabled' if not last else 'tao_mode'});
% if not last:
if (res <= 0)
{
return res;
}
% endif
% endfor
return res;
% endif
}
<!--(end)-->
${generate_codec_calls_per_field(call_name='encode', self_parameter_type='ParameterType')}
${generate_codec_calls_per_field(call_name='decode', self_parameter_type='ReferenceType')}
% if union:
% for idx,a in enumerate(fields):
template <>
template <>
struct ${scope_prefix}<0>::TagToType<${scope_prefix}<0>::Tag::${a.name}>
{
typedef typename ${scope_prefix}<0>::FieldTypes::${a.name} Type;
typedef typename ::uavcan::StorageType<Type>::Type StorageType;
};
template <>
template <>
inline const typename ${scope_prefix}<0>::TagToType< ${scope_prefix}<0>::Tag::${a.name} >::StorageType*
${scope_prefix}<0>::as< ${scope_prefix}<0>::Tag::${a.name} >() const
{
return is(${scope_prefix}<0>::Tag::${a.name}) ? &${a.name} : UAVCAN_NULLPTR;
}
template <>
template <>
inline typename ${scope_prefix}<0>::TagToType< ${scope_prefix}<0>::Tag::${a.name} >::StorageType&
${scope_prefix}<0>::to< ${scope_prefix}<0>::Tag::${a.name} >()
{
if (_tag_ != ${idx})
{
_tag_ = ${idx};
${a.name} = typename TagToType< ${scope_prefix}<0>::Tag::${a.name} >::StorageType();
}
return ${a.name};
}
% endfor
% endif
<!--(end)-->
% if t.kind == t.KIND_SERVICE:
${define_out_of_line_struct_methods(scope_prefix=t.cpp_type_name + '::Request_', fields=t.request_fields, \
union=t.request_union)}
${define_out_of_line_struct_methods(scope_prefix=t.cpp_type_name + '::Response_', fields=t.response_fields, \
union=t.response_union)}
% else:
${define_out_of_line_struct_methods(scope_prefix=t.cpp_type_name, fields=t.fields, union=t.union)}
% endif
/*
* Out of line type method definitions
*/
% if t.kind == t.KIND_SERVICE:
inline ::uavcan::DataTypeSignature ${t.cpp_type_name}::getDataTypeSignature()
% else:
template <int _tmpl>
::uavcan::DataTypeSignature ${t.cpp_type_name}<_tmpl>::getDataTypeSignature()
% endif
{
::uavcan::DataTypeSignature signature(${'0x%08X' % t.get_dsdl_signature()}ULL);
<!--(macro extend_signature_per_field)--> #! scope_prefix, fields
% for a in fields:
${scope_prefix}FieldTypes::${a.name}::extendDataTypeSignature(signature);
% endfor
<!--(end)-->
% if t.kind == t.KIND_SERVICE:
${extend_signature_per_field(scope_prefix='Request::', fields=t.request_fields)}
${extend_signature_per_field(scope_prefix='Response::', fields=t.response_fields)}
% else:
${extend_signature_per_field(scope_prefix='', fields=t.fields)}
% endif
return signature;
}
/*
* Out of line constant definitions
*/
<!--(macro define_out_of_line_constants)--> #! scope_prefix, constants
% for a in constants:
template <int _tmpl>
const typename ::uavcan::StorageType< typename ${scope_prefix}<_tmpl>::ConstantTypes::${a.name} >::Type
${scope_prefix}<_tmpl>::${a.name} = ${a.cpp_value}; // ${a.init_expression}
% endfor
<!--(end)-->
% if t.kind == t.KIND_SERVICE:
${define_out_of_line_constants(scope_prefix=t.cpp_type_name + '::Request_', constants=t.request_constants)}
${define_out_of_line_constants(scope_prefix=t.cpp_type_name + '::Response_', constants=t.response_constants)}
% else:
${define_out_of_line_constants(scope_prefix=t.cpp_type_name, constants=t.constants)}
% endif
/*
* Final typedef
*/
% if t.kind == t.KIND_SERVICE:
typedef ${t.cpp_type_name} ${t.short_name};
% else:
typedef ${t.cpp_type_name}<0> ${t.short_name};
% endif
% if t.has_default_dtid:
namespace
{
const ::uavcan::DefaultDataTypeRegistrator< ${t.cpp_full_type_name} > _uavcan_gdtr_registrator_${t.short_name};
}
% else:
// No default registration
% endif
% for nsc in t.cpp_namespace_components[::-1]:
} // Namespace ${nsc}
% endfor
/*
* YAML streamer specialization
*/
namespace uavcan
{
<!--(macro define_yaml_streamer)--> #! type_name, fields, union
template <>
class UAVCAN_EXPORT YamlStreamer< ${type_name} >
{
public:
template <typename Stream>
static void stream(Stream& s, ${type_name}::ParameterType obj, const int level);
};
template <typename Stream>
void YamlStreamer< ${type_name} >::stream(Stream& s, ${type_name}::ParameterType obj, const int level)
{
(void)s;
(void)obj;
(void)level;
% if union:
if (level > 0)
{
s << '\n';
for (int pos = 0; pos < level; pos++)
{
s << " ";
}
}
% for idx,a in enumerate(fields):
if (static_cast<int>(obj.getTag()) == ${idx})
{
s << "${a.name}: ";
YamlStreamer< ${type_name}::FieldTypes::${a.name} >::stream(s, obj.${a.name}, level + 1);
}
% endfor
% else:
% for idx,a in enumerate([x for x in fields if not x.void]):
% if idx == 0:
if (level > 0)
{
s << '\n';
for (int pos = 0; pos < level; pos++)
{
s << " ";
}
}
% else:
s << '\n';
for (int pos = 0; pos < level; pos++)
{
s << " ";
}
% endif
s << "${a.name}: ";
YamlStreamer< ${type_name}::FieldTypes::${a.name} >::stream(s, obj.${a.name}, level + 1);
% endfor
% endif
}
<!--(end)-->
% if t.kind == t.KIND_SERVICE:
${define_yaml_streamer(type_name=t.cpp_full_type_name + '::Request', fields=t.request_fields, union=t.request_union)}
${define_yaml_streamer(type_name=t.cpp_full_type_name + '::Response', fields=t.response_fields, union=t.response_union)}
% else:
${define_yaml_streamer(type_name=t.cpp_full_type_name, fields=t.fields, union=t.union)}
% endif
}
% for nsc in t.cpp_namespace_components:
namespace ${nsc}
{
% endfor
<!--(macro define_streaming_operator)--> #! type_name
template <typename Stream>
inline Stream& operator<<(Stream& s, ${type_name}::ParameterType obj)
{
::uavcan::YamlStreamer< ${type_name} >::stream(s, obj, 0);
return s;
}
<!--(end)-->
% if t.kind == t.KIND_SERVICE:
${define_streaming_operator(type_name=t.cpp_full_type_name + '::Request')}
${define_streaming_operator(type_name=t.cpp_full_type_name + '::Response')}
% else:
${define_streaming_operator(type_name=t.cpp_full_type_name)}
% endif
% for nsc in t.cpp_namespace_components[::-1]:
} // Namespace ${nsc}
% endfor
#endif // ${t.include_guard}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,64 @@
#!/usr/bin/env python
#
# UAVCAN DSDL compiler for libuavcan
# Supported Python versions: 3.2+, 2.7.
#
# Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
#
from __future__ import division, absolute_import, print_function, unicode_literals
import os, sys, logging, argparse
# This trickery allows to use the compiler even if pyuavcan is not installed in the system.
# This is extremely important, as it makes the compiler (and therefore libuavcan in general)
# totally dependency-free, except for the Python interpreter itself.
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_PYUAVCAN_DIR = os.path.join(SCRIPT_DIR, 'pydronecan')
RUNNING_FROM_SRC_DIR = os.path.isdir(LOCAL_PYUAVCAN_DIR)
if RUNNING_FROM_SRC_DIR:
#print('Running from the source directory')
sys.path.insert(0, SCRIPT_DIR)
sys.path.insert(0, LOCAL_PYUAVCAN_DIR)
def configure_logging(verbosity):
fmt = '%(message)s'
level = { 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG }.get(verbosity or 0, logging.DEBUG)
logging.basicConfig(stream=sys.stderr, level=level, format=fmt)
def die(text):
print(text, file=sys.stderr)
exit(1)
DEFAULT_OUTDIR = 'dsdlc_generated'
DESCRIPTION = '''UAVCAN DSDL compiler for libuavcan.
Takes an input directory that contains an hierarchy of DSDL
definitions and converts it into compatible hierarchy of C++ types for libuavcan.
This script can be used directly from the source directory, no installation required!
Supported Python versions: 3.2+, 2.7.
'''
argparser = argparse.ArgumentParser(description=DESCRIPTION)
argparser.add_argument('source_dir', nargs='+', help='source directory with DSDL definitions')
argparser.add_argument('--verbose', '-v', action='count', help='verbosity level (-v, -vv)')
argparser.add_argument('--outdir', '-O', default=DEFAULT_OUTDIR, help='output directory, default %s' % DEFAULT_OUTDIR)
argparser.add_argument('--incdir', '-I', default=[], action='append', help=
'''nested type namespaces, one path per argument. Can be also specified through the environment variable
UAVCAN_DSDL_INCLUDE_PATH, where the path entries are separated by colons ":"''')
args = argparser.parse_args()
configure_logging(args.verbose)
try:
extra_incdir = os.environ['UAVCAN_DSDL_INCLUDE_PATH'].split(':')
logging.info('Additional include directories: %s', extra_incdir)
args.incdir += extra_incdir
except KeyError:
pass
from libuavcan_dsdl_compiler import run as dsdlc_run
try:
dsdlc_run(args.source_dir, args.incdir, args.outdir)
except Exception as ex:
logging.error('Compiler failure', exc_info=True)
die(str(ex))
@@ -0,0 +1,276 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_BUILD_CONFIG_HPP_INCLUDED
#define UAVCAN_BUILD_CONFIG_HPP_INCLUDED
/**
* UAVCAN version definition
*/
#define UAVCAN_VERSION_MAJOR 1
#define UAVCAN_VERSION_MINOR 0
/**
* UAVCAN_CPP_VERSION - version of the C++ standard used during compilation.
* This definition contains the integer year number after which the standard was named:
* - 2003 for C++03
* - 2011 for C++11
*
* This config automatically sets according to the actual C++ standard used by the compiler.
*
* In C++03 mode the library has almost zero dependency on the C++ standard library, which allows
* to use it on platforms with a very limited C++ support. On the other hand, C++11 mode requires
* many parts of the standard library (e.g. <functional>), thus the user might want to force older
* standard than used by the compiler, in which case this symbol can be overridden manually via
* compiler flags.
*/
#define UAVCAN_CPP11 2011
#define UAVCAN_CPP03 2003
#ifndef UAVCAN_CPP_VERSION
# if __cplusplus > 201200
# error Unsupported C++ standard. You can explicitly set UAVCAN_CPP_VERSION=UAVCAN_CPP11 to silence this error.
# elif (__cplusplus > 201100) || defined(__GXX_EXPERIMENTAL_CXX0X__)
# define UAVCAN_CPP_VERSION UAVCAN_CPP11
# else
# define UAVCAN_CPP_VERSION UAVCAN_CPP03
# endif
#endif
/**
* The library uses UAVCAN_NULLPTR instead of UAVCAN_NULLPTR and nullptr in order to allow the use of
* -Wzero-as-null-pointer-constant.
*/
#ifndef UAVCAN_NULLPTR
# if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# define UAVCAN_NULLPTR nullptr
# else
# define UAVCAN_NULLPTR NULL
# endif
#endif
/**
* By default, libuavcan enables all features if it detects that it is being built for a general-purpose
* target like Linux. Value of this macro influences other configuration options located below in this file.
* This macro can be overriden if needed.
*/
#ifndef UAVCAN_GENERAL_PURPOSE_PLATFORM
# if (defined(__linux__) || defined(__linux) || defined(__APPLE__) ||\
defined(_WIN64) || defined(_WIN32) || defined(__ANDROID__) ||\
defined(_SYSTYPE_BSD) || defined(__FreeBSD__))
# define UAVCAN_GENERAL_PURPOSE_PLATFORM 1
# else
# define UAVCAN_GENERAL_PURPOSE_PLATFORM 0
# endif
#endif
/**
* This macro enables built-in runtime checks and debug output via printf().
* Should be used only for library development.
*/
#ifndef UAVCAN_DEBUG
# define UAVCAN_DEBUG 0
#endif
/**
* This option allows to select whether libuavcan should throw exceptions on fatal errors, or try to handle
* errors differently. By default, exceptions will be enabled only if the library is built for a general-purpose
* OS like Linux. Set UAVCAN_EXCEPTIONS explicitly to override.
*/
#ifndef UAVCAN_EXCEPTIONS
# define UAVCAN_EXCEPTIONS UAVCAN_GENERAL_PURPOSE_PLATFORM
#endif
/**
* This specification is used by some error reporting functions like in the Logger class.
* The default can be overriden by defining the macro UAVCAN_NOEXCEPT explicitly, e.g. via compiler options.
*/
#ifndef UAVCAN_NOEXCEPT
# if UAVCAN_EXCEPTIONS
# if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# define UAVCAN_NOEXCEPT noexcept
# else
# define UAVCAN_NOEXCEPT throw()
# endif
# else
# define UAVCAN_NOEXCEPT
# endif
#endif
/**
* Declaration visibility
* http://gcc.gnu.org/wiki/Visibility
*/
#ifndef UAVCAN_EXPORT
# define UAVCAN_EXPORT
#endif
/**
* Trade-off between ROM/RAM usage and functionality/determinism.
* Note that this feature is not well tested and should be avoided.
* Use code search for UAVCAN_TINY to find what functionality will be disabled.
* This is particularly useful for embedded systems with less than 40kB of ROM.
*/
#ifndef UAVCAN_TINY
# define UAVCAN_TINY 0
#endif
/**
* Disable the global data type registry, which can save some space on embedded systems.
*/
#ifndef UAVCAN_NO_GLOBAL_DATA_TYPE_REGISTRY
# define UAVCAN_NO_GLOBAL_DATA_TYPE_REGISTRY 0
#endif
/**
* toString() methods will be disabled by default, unless the library is built for a general-purpose target like Linux.
* It is not recommended to enable toString() on embedded targets as code size will explode.
*/
#ifndef UAVCAN_TOSTRING
# if UAVCAN_EXCEPTIONS
# define UAVCAN_TOSTRING UAVCAN_GENERAL_PURPOSE_PLATFORM
# else
# define UAVCAN_TOSTRING 0
# endif
#endif
#if UAVCAN_TOSTRING
# if !UAVCAN_EXCEPTIONS
# error UAVCAN_TOSTRING requires UAVCAN_EXCEPTIONS
# endif
# include <string>
#endif
/**
* Some C++ implementations are half-broken because they don't implement the placement new operator.
* If UAVCAN_IMPLEMENT_PLACEMENT_NEW is defined, libuavcan will implement its own operator new (std::size_t, void*)
* and its delete() counterpart, instead of relying on the standard header <new>.
*/
#ifndef UAVCAN_IMPLEMENT_PLACEMENT_NEW
# define UAVCAN_IMPLEMENT_PLACEMENT_NEW 0
#endif
/**
* Allows the user's application to provide custom implementation of uavcan::snprintf(),
* which is often useful on deeply embedded systems.
*/
#ifndef UAVCAN_USE_EXTERNAL_SNPRINTF
# define UAVCAN_USE_EXTERNAL_SNPRINTF 0
#endif
/**
* Allows the user's application to provide a custom implementation of IEEE754Converter::nativeIeeeToHalf and
* IEEE754Converter::halfToNativeIeee.
*/
#ifndef UAVCAN_USE_EXTERNAL_FLOAT16_CONVERSION
# define UAVCAN_USE_EXTERNAL_FLOAT16_CONVERSION 0
#endif
/**
* Run time checks.
* Resolves to the standard assert() by default.
* Disabled completely if UAVCAN_NO_ASSERTIONS is defined.
*/
#ifndef UAVCAN_ASSERT
# ifndef UAVCAN_NO_ASSERTIONS
# define UAVCAN_NO_ASSERTIONS 0
# endif
# if UAVCAN_NO_ASSERTIONS
# define UAVCAN_ASSERT(x)
# else
# define UAVCAN_ASSERT(x) assert(x)
# endif
#endif
#ifndef UAVCAN_LIKELY
# if __GNUC__
# define UAVCAN_LIKELY(x) __builtin_expect(!!(x), true)
# else
# define UAVCAN_LIKELY(x) (x)
# endif
#endif
#ifndef UAVCAN_UNLIKELY
# if __GNUC__
# define UAVCAN_UNLIKELY(x) __builtin_expect(!!(x), false)
# else
# define UAVCAN_UNLIKELY(x) (x)
# endif
#endif
namespace uavcan
{
/**
* Memory pool block size.
*
* The default of 64 bytes should be OK for any target arch up to AMD64 and any compiler.
*
* The library leverages compile-time checks to ensure that all types that are subject to dynamic allocation
* fit this size, otherwise compilation fails.
*
* For platforms featuring small pointer width (16..32 bits), UAVCAN_MEM_POOL_BLOCK_SIZE can often be safely
* reduced to 56 or even 48 bytes, which leads to lower memory footprint.
*
* Note that the pool block size shall be aligned at biggest alignment of the target platform (detected and
* checked automatically at compile time).
*/
#ifdef UAVCAN_MEM_POOL_BLOCK_SIZE
/// Explicitly specified by the user.
static const unsigned MemPoolBlockSize = UAVCAN_MEM_POOL_BLOCK_SIZE;
#elif defined(__BIGGEST_ALIGNMENT__) && (__BIGGEST_ALIGNMENT__ <= 8)
/// Convenient default for GCC-like compilers - if alignment allows, pool block size can be safely reduced.
static const unsigned MemPoolBlockSize = 56;
#else
/// Safe default that should be OK for any platform.
static const unsigned MemPoolBlockSize = 64;
#endif
#ifdef __BIGGEST_ALIGNMENT__
static const unsigned MemPoolAlignment = __BIGGEST_ALIGNMENT__;
#else
static const unsigned MemPoolAlignment = 16;
#endif
typedef char _alignment_check_for_MEM_POOL_BLOCK_SIZE[((MemPoolBlockSize & (MemPoolAlignment - 1)) == 0) ? 1 : -1];
/**
* This class that allows to check at compile time whether type T can be allocated using the memory pool.
* If the check fails, compilation fails.
*/
template <typename T>
struct UAVCAN_EXPORT IsDynamicallyAllocatable
{
static void check()
{
char dummy[(sizeof(T) <= MemPoolBlockSize) ? 1 : -1] = { '0' };
(void)dummy;
}
};
/**
* Float comparison precision.
* For details refer to:
* http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
* https://code.google.com/p/googletest/source/browse/trunk/include/gtest/internal/gtest-internal.h
*/
#ifdef UAVCAN_FLOAT_COMPARISON_EPSILON_MULT
static const unsigned FloatComparisonEpsilonMult = UAVCAN_FLOAT_COMPARISON_EPSILON_MULT;
#else
static const unsigned FloatComparisonEpsilonMult = 10;
#endif
/**
* Maximum number of CAN acceptance filters available on the platform
*/
#ifdef UAVCAN_MAX_CAN_ACCEPTANCE_FILTERS
/// Explicitly specified by the user.
static const unsigned MaxCanAcceptanceFilters = UAVCAN_MAX_CAN_ACCEPTANCE_FILTERS;
#else
/// Default that should be OK for any platform.
static const unsigned MaxCanAcceptanceFilters = 32;
#endif
}
#endif // UAVCAN_BUILD_CONFIG_HPP_INCLUDED
@@ -0,0 +1,176 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_DATA_TYPE_HPP_INCLUDED
#define UAVCAN_DATA_TYPE_HPP_INCLUDED
#include <cassert>
#include <cstring>
#include <uavcan/std.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/transport/transfer.hpp>
namespace uavcan
{
class UAVCAN_EXPORT TransferCRC;
enum DataTypeKind
{
DataTypeKindService,
DataTypeKindMessage
};
static const uint8_t NumDataTypeKinds = 2;
static inline DataTypeKind getDataTypeKindForTransferType(const TransferType tt)
{
if (tt == TransferTypeServiceResponse ||
tt == TransferTypeServiceRequest)
{
return DataTypeKindService;
}
else if (tt == TransferTypeMessageBroadcast)
{
return DataTypeKindMessage;
}
else
{
UAVCAN_ASSERT(0);
return DataTypeKind(0);
}
}
class UAVCAN_EXPORT DataTypeID
{
uint32_t value_;
public:
static const uint16_t MaxServiceDataTypeIDValue = 255;
static const uint16_t MaxMessageDataTypeIDValue = 65535;
static const uint16_t MaxPossibleDataTypeIDValue = MaxMessageDataTypeIDValue;
DataTypeID() : value_(0xFFFFFFFFUL) { }
DataTypeID(uint16_t id) // Implicit
: value_(id)
{ }
static DataTypeID getMaxValueForDataTypeKind(const DataTypeKind dtkind);
bool isValidForDataTypeKind(DataTypeKind dtkind) const
{
return value_ <= getMaxValueForDataTypeKind(dtkind).get();
}
uint16_t get() const { return static_cast<uint16_t>(value_); }
bool operator==(DataTypeID rhs) const { return value_ == rhs.value_; }
bool operator!=(DataTypeID rhs) const { return value_ != rhs.value_; }
bool operator<(DataTypeID rhs) const { return value_ < rhs.value_; }
bool operator>(DataTypeID rhs) const { return value_ > rhs.value_; }
bool operator<=(DataTypeID rhs) const { return value_ <= rhs.value_; }
bool operator>=(DataTypeID rhs) const { return value_ >= rhs.value_; }
};
/**
* CRC-64-WE
* Description: http://reveng.sourceforge.net/crc-catalogue/17plus.htm#crc.cat-bits.64
* Initial value: 0xFFFFFFFFFFFFFFFF
* Poly: 0x42F0E1EBA9EA3693
* Reverse: no
* Output xor: 0xFFFFFFFFFFFFFFFF
* Check: 0x62EC59E3F1A4F00A
*/
class UAVCAN_EXPORT DataTypeSignatureCRC
{
uint64_t crc_;
public:
static DataTypeSignatureCRC extend(uint64_t crc);
DataTypeSignatureCRC() : crc_(0xFFFFFFFFFFFFFFFFULL) { }
void add(uint8_t byte);
void add(const uint8_t* bytes, unsigned len);
uint64_t get() const { return crc_ ^ 0xFFFFFFFFFFFFFFFFULL; }
};
class UAVCAN_EXPORT DataTypeSignature
{
uint64_t value_;
void mixin64(uint64_t x);
public:
DataTypeSignature() : value_(0) { }
explicit DataTypeSignature(uint64_t value) : value_(value) { }
void extend(DataTypeSignature dts);
TransferCRC toTransferCRC() const;
uint64_t get() const { return value_; }
bool operator==(DataTypeSignature rhs) const { return value_ == rhs.value_; }
bool operator!=(DataTypeSignature rhs) const { return !operator==(rhs); }
};
/**
* This class contains complete description of a data type.
*/
class UAVCAN_EXPORT DataTypeDescriptor
{
DataTypeSignature signature_;
const char* full_name_;
DataTypeKind kind_;
DataTypeID id_;
public:
static const unsigned MaxFullNameLen = 80;
DataTypeDescriptor() :
full_name_(""),
kind_(DataTypeKind(0))
{ }
DataTypeDescriptor(DataTypeKind kind, DataTypeID id, const DataTypeSignature& signature, const char* name) :
signature_(signature),
full_name_(name),
kind_(kind),
id_(id)
{
UAVCAN_ASSERT((kind == DataTypeKindMessage) || (kind == DataTypeKindService));
UAVCAN_ASSERT(name);
UAVCAN_ASSERT(std::strlen(name) <= MaxFullNameLen);
}
bool isValid() const;
DataTypeKind getKind() const { return kind_; }
DataTypeID getID() const { return id_; }
const DataTypeSignature& getSignature() const { return signature_; }
const char* getFullName() const { return full_name_; }
bool match(DataTypeKind kind, const char* name) const;
bool match(DataTypeKind kind, DataTypeID id) const;
bool operator!=(const DataTypeDescriptor& rhs) const { return !operator==(rhs); }
bool operator==(const DataTypeDescriptor& rhs) const;
#if UAVCAN_TOSTRING
std::string toString() const;
#endif
};
}
#endif // UAVCAN_DATA_TYPE_HPP_INCLUDED
@@ -0,0 +1,35 @@
/*
* Debug stuff, should only be used for library development.
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_DEBUG_HPP_INCLUDED
#define UAVCAN_DEBUG_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#if UAVCAN_DEBUG
# include <cstdio>
# include <cstdarg>
# if __GNUC__
__attribute__ ((format(printf, 2, 3)))
# endif
static void UAVCAN_TRACE(const char* src, const char* fmt, ...)
{
va_list args;
(void)std::printf("UAVCAN: %s: ", src);
va_start(args, fmt);
(void)std::vprintf(fmt, args);
va_end(args);
(void)std::puts("");
}
#else
# define UAVCAN_TRACE(...) ((void)0)
#endif
#endif // UAVCAN_DEBUG_HPP_INCLUDED
@@ -0,0 +1,249 @@
/*
* CAN bus driver interface.
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_DRIVER_CAN_HPP_INCLUDED
#define UAVCAN_DRIVER_CAN_HPP_INCLUDED
#include <cassert>
#include <uavcan/std.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/driver/system_clock.hpp>
namespace uavcan
{
/**
* This limit is defined by the specification.
*/
enum { MaxCanIfaces = 3 };
/**
* Raw CAN frame, as passed to/from the CAN driver.
*/
struct UAVCAN_EXPORT CanFrame
{
static const uint32_t MaskStdID = 0x000007FFU;
static const uint32_t MaskExtID = 0x1FFFFFFFU;
static const uint32_t FlagEFF = 1U << 31; ///< Extended frame format
static const uint32_t FlagRTR = 1U << 30; ///< Remote transmission request
static const uint32_t FlagERR = 1U << 29; ///< Error frame
static const uint8_t MaxDataLen = 8;
uint32_t id; ///< CAN ID with flags (above)
uint8_t data[MaxDataLen];
uint8_t dlc; ///< Data Length Code
CanFrame() :
id(0),
dlc(0)
{
fill(data, data + MaxDataLen, uint8_t(0));
}
CanFrame(uint32_t can_id, const uint8_t* can_data, uint8_t data_len) :
id(can_id),
dlc((data_len > MaxDataLen) ? MaxDataLen : data_len)
{
UAVCAN_ASSERT(can_data != UAVCAN_NULLPTR);
UAVCAN_ASSERT(data_len == dlc);
(void)copy(can_data, can_data + dlc, this->data);
}
bool operator!=(const CanFrame& rhs) const { return !operator==(rhs); }
bool operator==(const CanFrame& rhs) const
{
return (id == rhs.id) && (dlc == rhs.dlc) && equal(data, data + dlc, rhs.data);
}
bool isExtended() const { return id & FlagEFF; }
bool isRemoteTransmissionRequest() const { return id & FlagRTR; }
bool isErrorFrame() const { return id & FlagERR; }
#if UAVCAN_TOSTRING
enum StringRepresentation
{
StrTight, ///< Minimum string length (default)
StrAligned ///< Fixed formatting for any frame
};
std::string toString(StringRepresentation mode = StrTight) const;
#endif
/**
* CAN frame arbitration rules, particularly STD vs EXT:
* Marco Di Natale - "Understanding and using the Controller Area Network"
* http://www6.in.tum.de/pub/Main/TeachingWs2013MSE/CANbus.pdf
*/
bool priorityHigherThan(const CanFrame& rhs) const;
bool priorityLowerThan(const CanFrame& rhs) const { return rhs.priorityHigherThan(*this); }
};
/**
* CAN hardware filter config struct.
* Flags from @ref CanFrame can be applied to define frame type (EFF, EXT, etc.).
* @ref ICanIface::configureFilters().
*/
struct UAVCAN_EXPORT CanFilterConfig
{
uint32_t id;
uint32_t mask;
bool operator==(const CanFilterConfig& rhs) const
{
return rhs.id == id && rhs.mask == mask;
}
CanFilterConfig() :
id(0),
mask(0)
{ }
};
/**
* Events to look for during @ref ICanDriver::select() call.
* Bit position defines iface index, e.g. read = 1 << 2 to read from the third iface.
*/
struct UAVCAN_EXPORT CanSelectMasks
{
uint8_t read;
uint8_t write;
CanSelectMasks() :
read(0),
write(0)
{ }
};
/**
* Special IO flags.
*
* @ref CanIOFlagLoopback - Send the frame back to RX with true TX timestamps.
*
* @ref CanIOFlagAbortOnError - Abort transmission on first bus error instead of retransmitting. This does not
* affect the case of arbitration loss, in which case the retransmission will work
* as usual. This flag is used together with anonymous messages which allows to
* implement CSMA bus access. Read the spec for details.
*/
typedef uint16_t CanIOFlags;
static const CanIOFlags CanIOFlagLoopback = 1;
static const CanIOFlags CanIOFlagAbortOnError = 2;
/**
* Single non-blocking CAN interface.
*/
class UAVCAN_EXPORT ICanIface
{
public:
virtual ~ICanIface() { }
/**
* Non-blocking transmission.
*
* If the frame wasn't transmitted upon TX deadline, the driver should discard it.
*
* Note that it is LIKELY that the library will want to send the frames that were passed into the select()
* method as the next ones to transmit, but it is NOT guaranteed. The library can replace those with new
* frames between the calls.
*
* @return 1 = one frame transmitted, 0 = TX buffer full, negative for error.
*/
virtual int16_t send(const CanFrame& frame, MonotonicTime tx_deadline, CanIOFlags flags) = 0;
/**
* Non-blocking reception.
*
* Timestamps should be provided by the CAN driver, ideally by the hardware CAN controller.
*
* Monotonic timestamp is required and can be not precise since it is needed only for
* protocol timing validation (transfer timeouts and inter-transfer intervals).
*
* UTC timestamp is optional, if available it will be used for precise time synchronization;
* must be set to zero if not available.
*
* Refer to @ref ISystemClock to learn more about timestamps.
*
* @param [out] out_ts_monotonic Monotonic timestamp, mandatory.
* @param [out] out_ts_utc UTC timestamp, optional, zero if unknown.
* @return 1 = one frame received, 0 = RX buffer empty, negative for error.
*/
virtual int16_t receive(CanFrame& out_frame, MonotonicTime& out_ts_monotonic, UtcTime& out_ts_utc,
CanIOFlags& out_flags) = 0;
/**
* Configure the hardware CAN filters. @ref CanFilterConfig.
*
* @return 0 = success, negative for error.
*/
virtual int16_t configureFilters(const CanFilterConfig* filter_configs, uint16_t num_configs) = 0;
/**
* Number of available hardware filters.
*/
virtual uint16_t getNumFilters() const = 0;
/**
* Continuously incrementing counter of hardware errors.
* Arbitration lost should not be treated as a hardware error.
*/
virtual uint64_t getErrorCount() const = 0;
};
/**
* Generic CAN driver.
*/
class UAVCAN_EXPORT ICanDriver
{
public:
virtual ~ICanDriver() { }
/**
* Returns an interface by index, or null pointer if the index is out of range.
*/
virtual ICanIface* getIface(uint8_t iface_index) = 0;
/**
* Default implementation of this method calls the non-const overload of getIface().
* Can be overriden by the application if necessary.
*/
virtual const ICanIface* getIface(uint8_t iface_index) const
{
return const_cast<ICanDriver*>(this)->getIface(iface_index);
}
/**
* Total number of available CAN interfaces.
* This value shall not change after initialization.
*/
virtual uint8_t getNumIfaces() const = 0;
/**
* Block until the deadline, or one of the specified interfaces becomes available for read or write.
*
* Iface masks will be modified by the driver to indicate which exactly interfaces are available for IO.
*
* Bit position in the masks defines interface index.
*
* Note that it is allowed to return from this method even if no requested events actually happened, or if
* there are events that were not requested by the library.
*
* The pending TX argument contains an array of pointers to CAN frames that the library wants to transmit
* next, per interface. This is intended to allow the driver to properly prioritize transmissions; many
* drivers will not need to use it. If a write flag for the given interface is set to one in the select mask
* structure, then the corresponding pointer is guaranteed to be valid (not UAVCAN_NULLPTR).
*
* @param [in,out] inout_masks Masks indicating which interfaces are needed/available for IO.
* @param [in] pending_tx Array of frames, per interface, that are likely to be transmitted next.
* @param [in] blocking_deadline Zero means non-blocking operation.
* @return Positive number of ready interfaces or negative error code.
*/
virtual int16_t select(CanSelectMasks& inout_masks,
const CanFrame* (& pending_tx)[MaxCanIfaces],
MonotonicTime blocking_deadline) = 0;
};
}
#endif // UAVCAN_DRIVER_CAN_HPP_INCLUDED
@@ -0,0 +1,60 @@
/*
* System clock driver interface.
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_DRIVER_SYSTEM_CLOCK_HPP_INCLUDED
#define UAVCAN_DRIVER_SYSTEM_CLOCK_HPP_INCLUDED
#include <uavcan/std.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/time.hpp>
namespace uavcan
{
/**
* System clock interface - monotonic and UTC.
*/
class UAVCAN_EXPORT ISystemClock
{
public:
virtual ~ISystemClock() { }
/**
* Monototic system clock.
*
* This clock shall never jump or change rate; the base time is irrelevant.
* This clock is mandatory and must remain functional at all times.
*
* On POSIX systems use clock_gettime() with CLOCK_MONOTONIC.
*/
virtual MonotonicTime getMonotonic() const = 0;
/**
* Global network clock.
* It doesn't have to be UTC, the name is a bit misleading - actual time base doesn't matter.
*
* This clock can be synchronized with other nodes on the bus, hence it can jump and/or change
* rate occasionally.
* This clock is optional; if it is not supported, return zero. Also return zero if the UTC time
* is not available yet (e.g. the device has just started up with no battery clock).
*
* For POSIX refer to clock_gettime(), gettimeofday().
*/
virtual UtcTime getUtc() const = 0;
/**
* Adjust the network-synchronized clock.
* Refer to @ref getUtc() for details.
*
* For POSIX refer to adjtime(), settimeofday().
*
* @param [in] adjustment Amount of time to add to the clock value.
*/
virtual void adjustUtc(UtcDuration adjustment) = 0;
};
}
#endif // UAVCAN_DRIVER_SYSTEM_CLOCK_HPP_INCLUDED
@@ -0,0 +1,210 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_DYNAMIC_MEMORY_HPP_INCLUDED
#define UAVCAN_DYNAMIC_MEMORY_HPP_INCLUDED
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <uavcan/std.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/util/placement_new.hpp>
#include <uavcan/build_config.hpp>
namespace uavcan
{
/**
* This interface is used by other library components that need dynamic memory.
*/
class UAVCAN_EXPORT IPoolAllocator
{
public:
virtual ~IPoolAllocator() { }
virtual void* allocate(std::size_t size) = 0;
virtual void deallocate(const void* ptr) = 0;
/**
* Returns the maximum number of blocks this allocator can allocate.
*/
virtual uint16_t getBlockCapacity() const = 0;
};
/**
* Classic implementation of a pool allocator (Meyers).
*
* The allocator can be made thread-safe (optional) by means of providing a RAII-lock type via the second template
* argument. The allocator uses the lock only to access the shared state, therefore critical sections are only a few
* cycles long, which implies that it should be acceptable to use hardware IRQ disabling instead of a mutex for
* performance reasons. For example, an IRQ-based RAII-lock type can be implemented as follows:
* struct RaiiSynchronizer
* {
* RaiiSynchronizer() { __disable_irq(); }
* ~RaiiSynchronizer() { __enable_irq(); }
* };
*/
template <std::size_t PoolSize,
uint8_t BlockSize,
typename RaiiSynchronizer = char>
class UAVCAN_EXPORT PoolAllocator : public IPoolAllocator,
Noncopyable
{
union Node
{
uint8_t data[BlockSize];
Node* next;
};
Node* free_list_;
union
{
uint8_t bytes[PoolSize];
long double _aligner1;
long long _aligner2;
Node _aligner3;
} pool_;
uint16_t used_;
uint16_t max_used_;
public:
static const uint16_t NumBlocks = PoolSize / BlockSize;
PoolAllocator();
virtual void* allocate(std::size_t size) override;
virtual void deallocate(const void* ptr) override;
virtual uint16_t getBlockCapacity() const override { return NumBlocks; }
/**
* Return the number of blocks that are currently allocated/unallocated.
*/
uint16_t getNumUsedBlocks() const
{
RaiiSynchronizer lock;
(void)lock;
return used_;
}
uint16_t getNumFreeBlocks() const
{
RaiiSynchronizer lock;
(void)lock;
return static_cast<uint16_t>(NumBlocks - used_);
}
/**
* Returns the maximum number of blocks that were ever allocated at the same time.
*/
uint16_t getPeakNumUsedBlocks() const
{
RaiiSynchronizer lock;
(void)lock;
return max_used_;
}
};
/**
* Limits the maximum number of blocks that can be allocated in a given allocator.
*/
class LimitedPoolAllocator : public IPoolAllocator
{
IPoolAllocator& allocator_;
const uint16_t max_blocks_;
uint16_t used_blocks_;
public:
LimitedPoolAllocator(IPoolAllocator& allocator, std::size_t max_blocks)
: allocator_(allocator)
, max_blocks_(static_cast<uint16_t>(min<std::size_t>(max_blocks, 0xFFFFU)))
, used_blocks_(0)
{
UAVCAN_ASSERT(max_blocks_ > 0);
}
virtual void* allocate(std::size_t size) override;
virtual void deallocate(const void* ptr) override;
virtual uint16_t getBlockCapacity() const override;
};
// ----------------------------------------------------------------------------
/*
* PoolAllocator<>
*/
template <std::size_t PoolSize, uint8_t BlockSize, typename RaiiSynchronizer>
const uint16_t PoolAllocator<PoolSize, BlockSize, RaiiSynchronizer>::NumBlocks;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-align"
template <std::size_t PoolSize, uint8_t BlockSize, typename RaiiSynchronizer>
PoolAllocator<PoolSize, BlockSize, RaiiSynchronizer>::PoolAllocator() :
free_list_(reinterpret_cast<Node*>(pool_.bytes)),
used_(0),
max_used_(0)
{
// The limit is imposed by the width of the pool usage tracking variables.
StaticAssert<((PoolSize / BlockSize) <= 0xFFFFU)>::check();
(void)std::memset(pool_.bytes, 0, PoolSize);
for (unsigned i = 0; (i + 1) < (NumBlocks - 1 + 1); i++) // -Werror=type-limits
{
// coverity[dead_error_line : FALSE]
free_list_[i].next = free_list_ + i + 1;
}
free_list_[NumBlocks - 1].next = UAVCAN_NULLPTR;
}
#pragma GCC diagnostic pop
template <std::size_t PoolSize, uint8_t BlockSize, typename RaiiSynchronizer>
void* PoolAllocator<PoolSize, BlockSize, RaiiSynchronizer>::allocate(std::size_t size)
{
if (free_list_ == UAVCAN_NULLPTR || size > BlockSize)
{
return UAVCAN_NULLPTR;
}
RaiiSynchronizer lock;
(void)lock;
void* pmem = free_list_;
free_list_ = free_list_->next;
// Statistics
UAVCAN_ASSERT(used_ < NumBlocks);
used_++;
if (used_ > max_used_)
{
max_used_ = used_;
}
return pmem;
}
template <std::size_t PoolSize, uint8_t BlockSize, typename RaiiSynchronizer>
void PoolAllocator<PoolSize, BlockSize, RaiiSynchronizer>::deallocate(const void* ptr)
{
if (ptr == UAVCAN_NULLPTR)
{
return;
}
RaiiSynchronizer lock;
(void)lock;
Node* p = static_cast<Node*>(const_cast<void*>(ptr));
p->next = free_list_;
free_list_ = p;
// Statistics
UAVCAN_ASSERT(used_ > 0);
used_--;
}
}
#endif // UAVCAN_DYNAMIC_MEMORY_HPP_INCLUDED
@@ -0,0 +1,60 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_ERROR_HPP_INCLUDED
#define UAVCAN_ERROR_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/std.hpp>
namespace uavcan
{
namespace
{
/**
* Common error codes.
*
* Functions that return signed integers may also return inverted error codes,
* i.e. returned value should be inverted back to get the actual error code.
*
* Return code 0 (zero) means no error.
*
* @{
*/
const int16_t ErrFailure = 1; ///< General failure
const int16_t ErrInvalidParam = 2;
const int16_t ErrMemory = 3;
const int16_t ErrDriver = 4; ///< Platform driver error
const int16_t ErrUnknownDataType = 5;
const int16_t ErrInvalidMarshalData = 6;
const int16_t ErrInvalidTransferListener = 7;
const int16_t ErrNotInited = 8;
const int16_t ErrRecursiveCall = 9;
const int16_t ErrLogic = 10;
const int16_t ErrPassiveMode = 11; ///< Operation not permitted in passive mode
const int16_t ErrTransferTooLong = 12; ///< Transfer of this length cannot be sent with given transfer type
const int16_t ErrInvalidConfiguration = 13;
/**
* @}
*/
}
/**
* Fatal error handler.
* Behavior:
* - If exceptions are enabled, throws std::runtime_error() with the supplied message text;
* - If assertions are enabled (see UAVCAN_ASSERT()), aborts execution using zero assertion.
* - Otherwise aborts execution via std::abort().
*/
#if __GNUC__
__attribute__ ((noreturn))
#endif
UAVCAN_EXPORT
// coverity[+kill]
void handleFatalError(const char* msg);
}
#endif // UAVCAN_ERROR_HPP_INCLUDED
@@ -0,0 +1,220 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_HELPERS_HEAP_BASED_POOL_ALLOCATOR_HPP_INCLUDED
#define UAVCAN_HELPERS_HEAP_BASED_POOL_ALLOCATOR_HPP_INCLUDED
#include <cstdlib>
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/dynamic_memory.hpp>
namespace uavcan
{
/**
* A special-purpose implementation of a pool allocator that keeps the pool in the heap using malloc()/free().
* The pool grows dynamically, ad-hoc, thus using as little memory as possible.
*
* Allocated blocks will not be freed back automatically, but there are two ways to force their deallocation:
* - Call @ref shrink() - this method frees all blocks that are unused at the moment.
* - Destroy the object - the desctructor calls @ref shrink().
*
* The pool can be limited in growth with hard and soft limits.
* The soft limit defines the value that will be reported via @ref IPoolAllocator::getBlockCapacity().
* The hard limit defines the maximum number of blocks that can be allocated from heap.
* Typically, the hard limit should be equal or greater than the soft limit.
*
* The allocator can be made thread-safe (optional) by means of providing a RAII-lock type via the second template
* argument. The allocator uses the lock only to access the shared state, therefore critical sections are only a few
* cycles long, which implies that it should be acceptable to use hardware IRQ disabling instead of a mutex for
* performance reasons. For example, an IRQ-based RAII-lock type can be implemented as follows:
* struct RaiiSynchronizer
* {
* RaiiSynchronizer() { __disable_irq(); }
* ~RaiiSynchronizer() { __enable_irq(); }
* };
*/
template <std::size_t BlockSize,
typename RaiiSynchronizer = char>
class UAVCAN_EXPORT HeapBasedPoolAllocator : public IPoolAllocator,
Noncopyable
{
union Node
{
Node* next;
private:
uint8_t data[BlockSize];
long double _aligner1;
long long _aligner2;
};
const uint16_t capacity_soft_limit_;
const uint16_t capacity_hard_limit_;
uint16_t num_reserved_blocks_;
uint16_t num_allocated_blocks_;
Node* reserve_;
public:
/**
* The allocator initializes with empty reserve, so first allocations will be served from heap.
*
* @param block_capacity_soft_limit Block capacity that will be reported via @ref getBlockCapacity().
*
* @param block_capacity_hard_limit Real block capacity limit; the number of allocated blocks will never
* exceed this value. Hard limit should be higher than soft limit.
* Default value is two times the soft limit.
*/
HeapBasedPoolAllocator(uint16_t block_capacity_soft_limit,
uint16_t block_capacity_hard_limit = 0) :
capacity_soft_limit_(block_capacity_soft_limit),
capacity_hard_limit_((block_capacity_hard_limit > 0) ? block_capacity_hard_limit :
static_cast<uint16_t>(min(static_cast<uint32_t>(block_capacity_soft_limit) * 2U,
static_cast<uint32_t>(NumericTraits<uint16_t>::max())))),
num_reserved_blocks_(0),
num_allocated_blocks_(0),
reserve_(UAVCAN_NULLPTR)
{ }
/**
* The destructor de-allocates all blocks that are currently in the reserve.
* BLOCKS THAT ARE CURRENTLY HELD BY THE APPLICATION WILL LEAK.
*/
~HeapBasedPoolAllocator()
{
shrink();
#if UAVCAN_DEBUG
if (num_allocated_blocks_ > 0)
{
UAVCAN_TRACE("HeapBasedPoolAllocator", "%u BLOCKS LEAKED", num_allocated_blocks_);
}
#endif
}
/**
* Takes a block from the reserve, unless it's empty.
* In the latter case, allocates a new block in the heap.
*/
virtual void* allocate(std::size_t size) override
{
if (size > BlockSize)
{
return UAVCAN_NULLPTR;
}
{
RaiiSynchronizer lock;
(void)lock;
Node* const p = reserve_;
if (UAVCAN_LIKELY(p != UAVCAN_NULLPTR))
{
reserve_ = reserve_->next;
num_allocated_blocks_++;
return p;
}
if (num_reserved_blocks_ >= capacity_hard_limit_) // Hard limit reached, no further allocations
{
return UAVCAN_NULLPTR;
}
}
// Unlikely branch
void* const m = std::malloc(sizeof(Node));
if (m != UAVCAN_NULLPTR)
{
RaiiSynchronizer lock;
(void)lock;
num_reserved_blocks_++;
num_allocated_blocks_++;
}
return m;
}
/**
* Puts the block back to reserve.
* The block will not be free()d automatically; see @ref shrink().
*/
virtual void deallocate(const void* ptr) override
{
if (ptr != UAVCAN_NULLPTR)
{
RaiiSynchronizer lock;
(void)lock;
Node* const node = static_cast<Node*>(const_cast<void*>(ptr));
node->next = reserve_;
reserve_ = node;
num_allocated_blocks_--;
}
}
/**
* The soft limit.
*/
virtual uint16_t getBlockCapacity() const override { return capacity_soft_limit_; }
/**
* The hard limit.
*/
uint16_t getBlockCapacityHardLimit() const { return capacity_hard_limit_; }
/**
* Frees all blocks that are not in use at the moment.
* Post-condition is getNumAllocatedBlocks() == getNumReservedBlocks().
*/
void shrink()
{
Node* p = UAVCAN_NULLPTR;
for (;;)
{
{
RaiiSynchronizer lock;
(void)lock;
// Removing from reserve and updating the counter.
p = reserve_;
if (p != UAVCAN_NULLPTR)
{
reserve_ = reserve_->next;
num_reserved_blocks_--;
}
else
{
break;
}
}
// Then freeing, having left the critical section.
std::free(p);
}
}
/**
* Number of blocks that are currently in use by the application.
*/
uint16_t getNumAllocatedBlocks() const
{
RaiiSynchronizer lock;
(void)lock;
return num_allocated_blocks_;
}
/**
* Number of blocks that are acquired from the heap.
*/
uint16_t getNumReservedBlocks() const
{
RaiiSynchronizer lock;
(void)lock;
return num_reserved_blocks_;
}
};
}
#endif
@@ -0,0 +1,63 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_HELPERS_OSTREAM_HPP_INCLUDED
#define UAVCAN_HELPERS_OSTREAM_HPP_INCLUDED
#include <uavcan/util/templates.hpp>
#include <cstdio>
namespace uavcan
{
/**
* Compact replacement for std::ostream for use on embedded systems.
* Can be used for printing UAVCAN messages to stdout.
*
* Relevant discussion: https://groups.google.com/forum/#!topic/px4users/6c1CLNutN90
*
* Usage:
* OStream::instance() << "Hello world!" << OStream::endl;
*/
class UAVCAN_EXPORT OStream : uavcan::Noncopyable
{
OStream() { }
public:
static OStream& instance()
{
static OStream s;
return s;
}
static OStream& endl(OStream& stream)
{
std::printf("\n");
return stream;
}
};
inline OStream& operator<<(OStream& s, long long x) { std::printf("%lld", x); return s; }
inline OStream& operator<<(OStream& s, unsigned long long x) { std::printf("%llu", x); return s; }
inline OStream& operator<<(OStream& s, long x) { std::printf("%ld", x); return s; }
inline OStream& operator<<(OStream& s, unsigned long x) { std::printf("%lu", x); return s; }
inline OStream& operator<<(OStream& s, int x) { std::printf("%d", x); return s; }
inline OStream& operator<<(OStream& s, unsigned int x) { std::printf("%u", x); return s; }
inline OStream& operator<<(OStream& s, short x) { return operator<<(s, static_cast<int>(x)); }
inline OStream& operator<<(OStream& s, unsigned short x) { return operator<<(s, static_cast<unsigned>(x)); }
inline OStream& operator<<(OStream& s, long double x) { std::printf("%Lg", x); return s; }
inline OStream& operator<<(OStream& s, double x) { std::printf("%g", x); return s; }
inline OStream& operator<<(OStream& s, float x) { return operator<<(s, static_cast<double>(x)); }
inline OStream& operator<<(OStream& s, char x) { std::printf("%c", x); return s; }
inline OStream& operator<<(OStream& s, const char* x) { std::printf("%s", x); return s; }
inline OStream& operator<<(OStream& s, OStream&(*manip)(OStream&)) { return manip(s); }
}
#endif // UAVCAN_HELPERS_OSTREAM_HPP_INCLUDED
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,93 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_MARSHAL_BIT_STREAM_HPP_INCLUDED
#define UAVCAN_MARSHAL_BIT_STREAM_HPP_INCLUDED
#include <cassert>
#include <uavcan/std.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/transport/abstract_transfer_buffer.hpp>
#include <uavcan/build_config.hpp>
namespace uavcan
{
/**
* This function implements fast copy of unaligned bit arrays. It isn't part of the library API, so it is not exported.
* @param src_org Source array
* @param src_offset Bit offset of the first source byte
* @param src_len Number of bits to copy
* @param dst_org Destination array
* @param dst_offset Bit offset of the first destination byte
*/
void bitarrayCopy(const unsigned char* src_org, std::size_t src_offset, std::size_t src_len,
unsigned char* dst_org, std::size_t dst_offset);
/**
* This class treats a chunk of memory as an array of bits.
* It is used by the bit codec for serialization/deserialization.
*/
class UAVCAN_EXPORT BitStream
{
static const unsigned MaxBytesPerRW = 16;
ITransferBuffer& buf_;
unsigned bit_offset_;
uint8_t byte_cache_;
static inline unsigned bitlenToBytelen(unsigned bits) { return (bits + 7) / 8; }
static inline void copyBitArrayAlignedToUnaligned(const uint8_t* src_org, unsigned src_len,
uint8_t* dst_org, unsigned dst_offset)
{
bitarrayCopy(reinterpret_cast<const unsigned char*>(src_org), 0, src_len,
reinterpret_cast<unsigned char*>(dst_org), dst_offset);
}
static inline void copyBitArrayUnalignedToAligned(const uint8_t* src_org, unsigned src_offset, unsigned src_len,
uint8_t* dst_org)
{
bitarrayCopy(reinterpret_cast<const unsigned char*>(src_org), src_offset, src_len,
reinterpret_cast<unsigned char*>(dst_org), 0);
}
public:
static const unsigned MaxBitsPerRW = MaxBytesPerRW * 8;
enum
{
ResultOutOfBuffer = 0,
ResultOk = 1
};
explicit BitStream(ITransferBuffer& buf)
: buf_(buf)
, bit_offset_(0)
, byte_cache_(0)
{
StaticAssert<sizeof(uint8_t) == 1>::check();
}
/**
* Write/read calls interpret bytes as bit arrays, 8 bits per byte, where the most
* significant bits have lower index, i.e.:
* Hex: 55 2d
* Bits: 01010101 00101101
* Indices: 0 .. 7 8 .. 15
* Return values:
* Negative - Error
* Zero - Out of buffer space
* Positive - OK
*/
int write(const uint8_t* bytes, const unsigned bitlen);
int read(uint8_t* bytes, const unsigned bitlen);
#if UAVCAN_TOSTRING
std::string toString() const;
#endif
};
}
#endif // UAVCAN_MARSHAL_BIT_STREAM_HPP_INCLUDED
@@ -0,0 +1,142 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_MARSHAL_CHAR_ARRAY_FORMATTER_HPP_INCLUDED
#define UAVCAN_MARSHAL_CHAR_ARRAY_FORMATTER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/marshal/array.hpp>
#if !defined(UAVCAN_CPP_VERSION) || !defined(UAVCAN_CPP11)
# error UAVCAN_CPP_VERSION
#endif
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# include <type_traits>
#endif
namespace uavcan
{
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
template <typename ArrayType_>
class UAVCAN_EXPORT CharArrayFormatter
{
ArrayType_& array_;
template <typename T>
typename std::enable_if<std::is_floating_point<T>::value>::type
writeValue(T value)
{
array_.template appendFormatted<double>("%g", double(value));
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value>::type
writeValue(T value)
{
if (std::is_same<T, char>())
{
if (array_.size() != array_.capacity())
{
array_.push_back(typename ArrayType_::ValueType(value));
}
}
else if (std::is_signed<T>())
{
array_.template appendFormatted<long long>("%lli", static_cast<long long>(value));
}
else
{
array_.template appendFormatted<unsigned long long>("%llu", static_cast<unsigned long long>(value));
}
}
template <typename T>
typename std::enable_if<std::is_pointer<T>::value && !std::is_same<T, const char*>::value>::type
writeValue(T value)
{
array_.template appendFormatted<const void*>("%p", static_cast<const void*>(value));
}
void writeValue(const char* value)
{
array_.template appendFormatted<const char*>("%s", value);
}
public:
typedef ArrayType_ ArrayType;
explicit CharArrayFormatter(ArrayType& array)
: array_(array)
{ }
ArrayType& getArray() { return array_; }
const ArrayType& getArray() const { return array_; }
void write(const char* text)
{
writeValue(text);
}
template <typename T, typename... Args>
void write(const char* s, T value, Args... args)
{
while (s && *s)
{
if (*s == '%')
{
s += 1;
if (*s != '%')
{
writeValue(value);
write(++s, args...);
break;
}
}
writeValue(*s++);
}
}
};
#else
template <typename ArrayType_>
class UAVCAN_EXPORT CharArrayFormatter
{
ArrayType_& array_;
public:
typedef ArrayType_ ArrayType;
CharArrayFormatter(ArrayType& array)
: array_(array)
{ }
ArrayType& getArray() { return array_; }
const ArrayType& getArray() const { return array_; }
void write(const char* const text)
{
array_.template appendFormatted<const char*>("%s", text);
}
/**
* This version does not support more than one formatted argument, though it can be improved.
* And it is unsafe.
* There is typesafe version for C++11 above.
*/
template <typename A>
void write(const char* const format, const A value)
{
array_.template appendFormatted<A>(format, value);
}
};
#endif
}
#endif // UAVCAN_MARSHAL_CHAR_ARRAY_FORMATTER_HPP_INCLUDED
@@ -0,0 +1,238 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_MARSHAL_FLOAT_SPEC_HPP_INCLUDED
#define UAVCAN_MARSHAL_FLOAT_SPEC_HPP_INCLUDED
#include <cmath>
#include <uavcan/std.hpp>
#include <uavcan/data_type.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/marshal/type_util.hpp>
#include <uavcan/marshal/integer_spec.hpp>
#ifndef UAVCAN_CPP_VERSION
# error UAVCAN_CPP_VERSION
#endif
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# include <limits> // Assuming that in C++11 mode all standard headers are available
#endif
namespace uavcan
{
template <unsigned BitLen>
struct NativeFloatSelector
{
struct ErrorNoSuchFloat;
typedef typename Select<(sizeof(float) * 8 >= BitLen), float,
typename Select<(sizeof(double) * 8 >= BitLen), double,
typename Select<(sizeof(long double) * 8 >= BitLen), long double,
ErrorNoSuchFloat>::Result>::Result>::Result Type;
};
class UAVCAN_EXPORT IEEE754Converter
{
// TODO: Non-IEEE float support
static uint16_t nativeIeeeToHalf(float value);
static float halfToNativeIeee(uint16_t value);
IEEE754Converter();
template <unsigned BitLen>
static void enforceIeee()
{
/*
* Some compilers may have is_iec559 to be defined false despite the fact that IEEE754 is supported.
* An acceptable workaround would be to put an #ifdef here.
*/
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
StaticAssert<std::numeric_limits<typename NativeFloatSelector<BitLen>::Type>::is_iec559>::check();
#endif
}
public:
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
/// UAVCAN requires rounding to nearest for all float conversions
static std::float_round_style roundstyle() { return std::round_to_nearest; }
#endif
template <unsigned BitLen>
static typename IntegerSpec<BitLen, SignednessUnsigned, CastModeTruncate>::StorageType
toIeee(typename NativeFloatSelector<BitLen>::Type value)
{
enforceIeee<BitLen>();
union
{
typename IntegerSpec<BitLen, SignednessUnsigned, CastModeTruncate>::StorageType i;
typename NativeFloatSelector<BitLen>::Type f;
} u;
StaticAssert<sizeof(u.f) * 8 == BitLen>::check();
u.f = value;
return u.i;
}
template <unsigned BitLen>
static typename NativeFloatSelector<BitLen>::Type
toNative(typename IntegerSpec<BitLen, SignednessUnsigned, CastModeTruncate>::StorageType value)
{
enforceIeee<BitLen>();
union
{
typename IntegerSpec<BitLen, SignednessUnsigned, CastModeTruncate>::StorageType i;
typename NativeFloatSelector<BitLen>::Type f;
} u;
StaticAssert<sizeof(u.f) * 8 == BitLen>::check();
u.i = value;
return u.f;
}
};
template <>
inline typename IntegerSpec<16, SignednessUnsigned, CastModeTruncate>::StorageType
IEEE754Converter::toIeee<16>(typename NativeFloatSelector<16>::Type value)
{
return nativeIeeeToHalf(value);
}
template <>
inline typename NativeFloatSelector<16>::Type
IEEE754Converter::toNative<16>(typename IntegerSpec<16, SignednessUnsigned, CastModeTruncate>::StorageType value)
{
return halfToNativeIeee(value);
}
template <unsigned BitLen> struct IEEE754Limits;
template <> struct IEEE754Limits<16>
{
typedef typename NativeFloatSelector<16>::Type NativeType;
static NativeType max() { return static_cast<NativeType>(65504.0); }
static NativeType epsilon() { return static_cast<NativeType>(9.77e-04); }
};
template <> struct IEEE754Limits<32>
{
typedef typename NativeFloatSelector<32>::Type NativeType;
static NativeType max() { return static_cast<NativeType>(3.40282346638528859812e+38); }
static NativeType epsilon() { return static_cast<NativeType>(1.19209289550781250000e-7); }
};
template <> struct IEEE754Limits<64>
{
typedef typename NativeFloatSelector<64>::Type NativeType;
static NativeType max() { return static_cast<NativeType>(1.79769313486231570815e+308L); }
static NativeType epsilon() { return static_cast<NativeType>(2.22044604925031308085e-16L); }
};
template <unsigned BitLen_, CastMode CastMode>
class UAVCAN_EXPORT FloatSpec : public IEEE754Limits<BitLen_>
{
FloatSpec();
public:
enum { BitLen = BitLen_ };
enum { MinBitLen = BitLen };
enum { MaxBitLen = BitLen };
enum { IsPrimitive = 1 };
typedef typename NativeFloatSelector<BitLen>::Type StorageType;
#if UAVCAN_CPP_VERSION < UAVCAN_CPP11
enum { IsExactRepresentation = (sizeof(StorageType) * 8 == BitLen) };
#else
enum { IsExactRepresentation = (sizeof(StorageType) * 8 == BitLen) && std::numeric_limits<StorageType>::is_iec559 };
#endif
using IEEE754Limits<BitLen>::max;
using IEEE754Limits<BitLen>::epsilon;
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
static std::float_round_style roundstyle() { return IEEE754Converter::roundstyle(); }
#endif
static int encode(StorageType value, ScalarCodec& codec, TailArrayOptimizationMode)
{
// cppcheck-suppress duplicateExpression
if (CastMode == CastModeSaturate)
{
saturate(value);
}
else
{
truncate(value);
}
return codec.encode<BitLen>(IEEE754Converter::toIeee<BitLen>(value));
}
static int decode(StorageType& out_value, ScalarCodec& codec, TailArrayOptimizationMode)
{
typename IntegerSpec<BitLen, SignednessUnsigned, CastModeTruncate>::StorageType ieee = 0;
const int res = codec.decode<BitLen>(ieee);
if (res <= 0)
{
return res;
}
out_value = IEEE754Converter::toNative<BitLen>(ieee);
return res;
}
static void extendDataTypeSignature(DataTypeSignature&) { }
private:
static inline void saturate(StorageType& value)
{
if ((IsExactRepresentation == 0) && isFinite(value))
{
if (value > max())
{
value = max();
}
else if (value < -max())
{
value = -max();
}
else
{
; // Valid range
}
}
}
static inline void truncate(StorageType& value)
{
if ((IsExactRepresentation == 0) && isFinite(value))
{
if (value > max())
{
value = NumericTraits<StorageType>::infinity();
}
else if (value < -max())
{
value = -NumericTraits<StorageType>::infinity();
}
else
{
; // Valid range
}
}
}
};
template <unsigned BitLen, CastMode CastMode>
class UAVCAN_EXPORT YamlStreamer<FloatSpec<BitLen, CastMode> >
{
typedef typename FloatSpec<BitLen, CastMode>::StorageType StorageType;
public:
template <typename Stream> // cppcheck-suppress passedByValue
static void stream(Stream& s, const StorageType value, int)
{
s << value;
}
};
}
#endif // UAVCAN_MARSHAL_FLOAT_SPEC_HPP_INCLUDED
@@ -0,0 +1,210 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_MARSHAL_INTEGER_SPEC_HPP_INCLUDED
#define UAVCAN_MARSHAL_INTEGER_SPEC_HPP_INCLUDED
#include <uavcan/std.hpp>
#include <uavcan/data_type.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/marshal/scalar_codec.hpp>
#include <uavcan/marshal/type_util.hpp>
namespace uavcan
{
enum Signedness { SignednessUnsigned, SignednessSigned };
/**
* This template will be used for signed and unsigned integers more than 1 bit long.
* There are explicit specializations for booleans below.
*/
template <unsigned BitLen_, Signedness Signedness, CastMode CastMode>
class UAVCAN_EXPORT IntegerSpec
{
struct ErrorNoSuchInteger;
public:
enum { IsSigned = Signedness == SignednessSigned };
enum { BitLen = BitLen_ };
enum { MinBitLen = BitLen };
enum { MaxBitLen = BitLen };
enum { IsPrimitive = 1 };
typedef typename Select<(BitLen <= 8), typename Select<IsSigned, int8_t, uint8_t>::Result,
typename Select<(BitLen <= 16), typename Select<IsSigned, int16_t, uint16_t>::Result,
typename Select<(BitLen <= 32), typename Select<IsSigned, int32_t, uint32_t>::Result,
typename Select<(BitLen <= 64), typename Select<IsSigned, int64_t, uint64_t>::Result,
ErrorNoSuchInteger>::Result>::Result>::Result>::Result StorageType;
typedef typename IntegerSpec<BitLen, SignednessUnsigned, CastMode>::StorageType UnsignedStorageType;
private:
IntegerSpec();
struct LimitsImplGeneric
{
static StorageType max()
{
StaticAssert<(sizeof(uintmax_t) >= 8)>::check();
if (IsSigned == 0)
{
return StorageType((uintmax_t(1) << static_cast<unsigned>(BitLen)) - 1U);
}
else
{
return StorageType((uintmax_t(1) << (static_cast<unsigned>(BitLen) - 1U)) - 1);
}
}
static UnsignedStorageType mask()
{
StaticAssert<(sizeof(uintmax_t) >= 8U)>::check();
return UnsignedStorageType((uintmax_t(1) << static_cast<unsigned>(BitLen)) - 1U);
}
};
struct LimitsImpl64
{
static StorageType max()
{
return StorageType((IsSigned == 0) ? 0xFFFFFFFFFFFFFFFFULL : 0x7FFFFFFFFFFFFFFFLL);
}
static UnsignedStorageType mask() { return 0xFFFFFFFFFFFFFFFFULL; }
};
typedef typename Select<(BitLen == 64), LimitsImpl64, LimitsImplGeneric>::Result Limits;
static void saturate(StorageType& value)
{
if (value > max())
{
value = max();
}
else if (value <= min()) // 'Less or Equal' allows to suppress compiler warning on unsigned types
{
value = min();
}
else
{
; // Valid range
}
}
static void truncate(StorageType& value) { value = value & StorageType(mask()); }
static void validate()
{
StaticAssert<(BitLen <= (sizeof(StorageType) * 8))>::check();
// coverity[result_independent_of_operands : FALSE]
UAVCAN_ASSERT(max() <= NumericTraits<StorageType>::max());
// coverity[result_independent_of_operands : FALSE]
UAVCAN_ASSERT(min() >= NumericTraits<StorageType>::min());
}
public:
static StorageType max() { return Limits::max(); }
static StorageType min() { return IsSigned ? StorageType(-max() - 1) : 0; }
static UnsignedStorageType mask() { return Limits::mask(); }
static int encode(StorageType value, ScalarCodec& codec, TailArrayOptimizationMode)
{
validate();
// cppcheck-suppress duplicateExpression
if (CastMode == CastModeSaturate)
{
saturate(value);
}
else
{
truncate(value);
}
return codec.encode<BitLen>(value);
}
static int decode(StorageType& out_value, ScalarCodec& codec, TailArrayOptimizationMode)
{
validate();
return codec.decode<BitLen>(out_value);
}
static void extendDataTypeSignature(DataTypeSignature&) { }
};
/**
* Boolean specialization
*/
template <CastMode CastMode>
class UAVCAN_EXPORT IntegerSpec<1, SignednessUnsigned, CastMode>
{
public:
enum { IsSigned = 0 };
enum { BitLen = 1 };
enum { MinBitLen = 1 };
enum { MaxBitLen = 1 };
enum { IsPrimitive = 1 };
typedef bool StorageType;
typedef bool UnsignedStorageType;
private:
IntegerSpec();
public:
static StorageType max() { return true; }
static StorageType min() { return false; }
static UnsignedStorageType mask() { return true; }
static int encode(StorageType value, ScalarCodec& codec, TailArrayOptimizationMode)
{
return codec.encode<BitLen>(value);
}
static int decode(StorageType& out_value, ScalarCodec& codec, TailArrayOptimizationMode)
{
return codec.decode<BitLen>(out_value);
}
static void extendDataTypeSignature(DataTypeSignature&) { }
};
template <CastMode CastMode>
class IntegerSpec<1, SignednessSigned, CastMode>; // Invalid instantiation
template <Signedness Signedness, CastMode CastMode>
class IntegerSpec<0, Signedness, CastMode>; // Invalid instantiation
template <typename T>
struct IsIntegerSpec
{
enum { Result = 0 };
};
template <unsigned BitLen, Signedness Signedness, CastMode CastMode>
struct IsIntegerSpec<IntegerSpec<BitLen, Signedness, CastMode> >
{
enum { Result = 1 };
};
template <unsigned BitLen, Signedness Signedness, CastMode CastMode>
class UAVCAN_EXPORT YamlStreamer<IntegerSpec<BitLen, Signedness, CastMode> >
{
typedef IntegerSpec<BitLen, Signedness, CastMode> RawType;
typedef typename RawType::StorageType StorageType;
public:
template <typename Stream> // cppcheck-suppress passedByValue
static void stream(Stream& s, const StorageType value, int)
{
// Get rid of character types - we want its integer representation, not ASCII code
typedef typename Select<(sizeof(StorageType) >= sizeof(int)), StorageType,
typename Select<RawType::IsSigned, int, unsigned>::Result >::Result TempType;
s << TempType(value);
}
};
}
#endif // UAVCAN_MARSHAL_INTEGER_SPEC_HPP_INCLUDED
@@ -0,0 +1,140 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_MARSHAL_SCALAR_CODEC_HPP_INCLUDED
#define UAVCAN_MARSHAL_SCALAR_CODEC_HPP_INCLUDED
#include <cassert>
#include <uavcan/std.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/marshal/bit_stream.hpp>
namespace uavcan
{
/**
* This class implements fast encoding/decoding of primitive type scalars into/from bit arrays.
* It uses the compile-time type information to eliminate run-time operations where possible.
*/
class UAVCAN_EXPORT ScalarCodec
{
BitStream& stream_;
static void swapByteOrder(uint8_t* bytes, unsigned len);
template <unsigned BitLen, unsigned Size>
static typename EnableIf<(BitLen > 8)>::Type
convertByteOrder(uint8_t (&bytes)[Size])
{
#if defined(BYTE_ORDER) && defined(BIG_ENDIAN)
static const bool big_endian = BYTE_ORDER == BIG_ENDIAN;
#else
union { long int l; char c[sizeof(long int)]; } u;
u.l = 1;
const bool big_endian = u.c[sizeof(long int) - 1] == 1;
#endif
/*
* I didn't have any big endian machine nearby, so big endian support wasn't tested yet.
* It is likely to be OK anyway, so feel free to remove this UAVCAN_ASSERT() as needed.
*/
UAVCAN_ASSERT(big_endian == false);
if (big_endian)
{
swapByteOrder(bytes, Size);
}
}
template <unsigned BitLen, unsigned Size>
static typename EnableIf<(BitLen <= 8)>::Type
convertByteOrder(uint8_t (&)[Size]) { }
template <unsigned BitLen, typename T>
static typename EnableIf<static_cast<bool>(NumericTraits<T>::IsSigned) && ((sizeof(T) * 8) > BitLen)>::Type
fixTwosComplement(T& value)
{
StaticAssert<NumericTraits<T>::IsInteger>::check(); // Not applicable to floating point types
if (value & (T(1) << (BitLen - 1))) // The most significant bit is set --> negative
{
value |= T(T(0xFFFFFFFFFFFFFFFFULL) & ~((T(1) << BitLen) - 1));
}
}
template <unsigned BitLen, typename T>
static typename EnableIf<!static_cast<bool>(NumericTraits<T>::IsSigned) || ((sizeof(T) * 8) == BitLen)>::Type
fixTwosComplement(T&) { }
template <unsigned BitLen, typename T>
static typename EnableIf<((sizeof(T) * 8) > BitLen)>::Type
clearExtraBits(T& value)
{
value &= (T(1) << BitLen) - 1; // Signedness doesn't matter
}
template <unsigned BitLen, typename T>
static typename EnableIf<((sizeof(T) * 8) == BitLen)>::Type
clearExtraBits(T&) { }
template <unsigned BitLen, typename T>
void validate()
{
StaticAssert<((sizeof(T) * 8) >= BitLen)>::check();
StaticAssert<(BitLen <= BitStream::MaxBitsPerRW)>::check();
StaticAssert<static_cast<bool>(NumericTraits<T>::IsSigned) ? (BitLen > 1) : true>::check();
}
int encodeBytesImpl(uint8_t* bytes, unsigned bitlen);
int decodeBytesImpl(uint8_t* bytes, unsigned bitlen);
public:
explicit ScalarCodec(BitStream& stream)
: stream_(stream)
{ }
template <unsigned BitLen, typename T>
int encode(const T value);
template <unsigned BitLen, typename T>
int decode(T& value);
};
// ----------------------------------------------------------------------------
template <unsigned BitLen, typename T>
int ScalarCodec::encode(const T value)
{
validate<BitLen, T>();
union ByteUnion
{
T value;
uint8_t bytes[sizeof(T)];
} byte_union;
byte_union.value = value;
clearExtraBits<BitLen, T>(byte_union.value);
convertByteOrder<BitLen>(byte_union.bytes);
return encodeBytesImpl(byte_union.bytes, BitLen);
}
template <unsigned BitLen, typename T>
int ScalarCodec::decode(T& value)
{
validate<BitLen, T>();
union ByteUnion
{
T value;
uint8_t bytes[sizeof(T)];
} byte_union;
byte_union.value = T();
const int read_res = decodeBytesImpl(byte_union.bytes, BitLen);
if (read_res > 0)
{
convertByteOrder<BitLen>(byte_union.bytes);
fixTwosComplement<BitLen, T>(byte_union.value);
value = byte_union.value;
}
return read_res;
}
}
#endif // UAVCAN_MARSHAL_SCALAR_CODEC_HPP_INCLUDED
@@ -0,0 +1,88 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_MARSHAL_TYPE_UTIL_HPP_INCLUDED
#define UAVCAN_MARSHAL_TYPE_UTIL_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/util/comparison.hpp>
namespace uavcan
{
/**
* Read the specs to learn more about cast modes.
*/
enum CastMode { CastModeSaturate, CastModeTruncate };
/**
* Read the specs to learn more about tail array optimizations.
*/
enum TailArrayOptimizationMode { TailArrayOptDisabled, TailArrayOptEnabled };
/**
* Compile-time: Returns the number of bits needed to represent an integer value.
*/
template <unsigned long Num>
struct IntegerBitLen
{
enum { Result = 1 + IntegerBitLen<(Num >> 1)>::Result };
};
template <>
struct IntegerBitLen<0>
{
enum { Result = 0 };
};
/**
* Compile-time: Returns the number of bytes needed to contain the given number of bits. Assumes 1 byte == 8 bit.
*/
template <unsigned long BitLen>
struct BitLenToByteLen
{
enum { Result = (BitLen + 7) / 8 };
};
/**
* Compile-time: Helper for integer and float specialization classes. Returns the platform-specific storage type.
*/
template <typename T, typename Enable = void>
struct StorageType
{
typedef T Type;
};
template <typename T>
struct StorageType<T, typename EnableIfType<typename T::StorageType>::Type>
{
typedef typename T::StorageType Type;
};
/**
* Compile-time: Whether T is a primitive type on this platform.
*/
template <typename T>
class IsPrimitiveType
{
typedef char Yes;
struct No { Yes dummy[8]; };
template <typename U>
static typename EnableIf<U::IsPrimitive, Yes>::Type test(int);
template <typename>
static No test(...);
public:
enum { Result = sizeof(test<T>(0)) == sizeof(Yes) };
};
/**
* Streams a given value into YAML string. Please see the specializations.
*/
template <typename T>
class UAVCAN_EXPORT YamlStreamer;
}
#endif // UAVCAN_MARSHAL_TYPE_UTIL_HPP_INCLUDED
@@ -0,0 +1,13 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_MARSHAL_TYPES_HPP_INCLUDED
#define UAVCAN_MARSHAL_TYPES_HPP_INCLUDED
#include <uavcan/marshal/integer_spec.hpp>
#include <uavcan/marshal/float_spec.hpp>
#include <uavcan/marshal/array.hpp>
#include <uavcan/marshal/type_util.hpp>
#endif // UAVCAN_MARSHAL_TYPES_HPP_INCLUDED
@@ -0,0 +1,130 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_ABSTRACT_NODE_HPP_INCLUDED
#define UAVCAN_NODE_ABSTRACT_NODE_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/dynamic_memory.hpp>
#include <uavcan/node/scheduler.hpp>
namespace uavcan
{
/**
* Abstract node class. If you're going to implement your own node class for your application,
* please inherit this class so it can be used with default publisher, subscriber, server, etc. classes.
* Normally you don't need to use it directly though - please refer to the class Node<> instead.
*/
class UAVCAN_EXPORT INode
{
public:
virtual ~INode() { }
virtual IPoolAllocator& getAllocator() = 0;
virtual Scheduler& getScheduler() = 0;
virtual const Scheduler& getScheduler() const = 0;
virtual void registerInternalFailure(const char* msg) = 0;
Dispatcher& getDispatcher() { return getScheduler().getDispatcher(); }
const Dispatcher& getDispatcher() const { return getScheduler().getDispatcher(); }
ISystemClock& getSystemClock() { return getScheduler().getSystemClock(); }
MonotonicTime getMonotonicTime() const { return getScheduler().getMonotonicTime(); }
UtcTime getUtcTime() const { return getScheduler().getUtcTime(); }
/**
* Returns the Node ID of this node.
* If Node ID was not set yet, an invalid value will be returned.
*/
NodeID getNodeID() const { return getScheduler().getDispatcher().getNodeID(); }
/**
* Sets the Node ID of this node.
* Node ID can be assigned only once. This method returns true if the Node ID was successfully assigned, otherwise
* it returns false.
* As long as a valid Node ID is not set, the node will remain in passive mode.
* Using a non-unicast Node ID puts the node into passive mode (as default).
*/
bool setNodeID(NodeID nid)
{
return getScheduler().getDispatcher().setNodeID(nid);
}
/**
* Whether the node is in passive mode, i.e. can't transmit anything to the bus.
* Please read the specs to learn more.
*/
bool isPassiveMode() const { return getScheduler().getDispatcher().isPassiveMode(); }
/**
* Same as @ref spin(MonotonicDuration), but the deadline is specified as an absolute time value
* rather than duration.
*/
int spin(MonotonicTime deadline)
{
return getScheduler().spin(deadline);
}
/**
* Runs the node.
* Normally your application should not block anywhere else.
* Block inside this method forever or call it periodically.
* This method returns 0 if no errors occurred, or a negative error code if something failed (see error.hpp).
*/
int spin(MonotonicDuration duration)
{
return getScheduler().spin(getMonotonicTime() + duration);
}
/**
* This method is designed for non-blocking applications.
* Instead of blocking, it returns immediately once all available CAN frames and timer events are processed.
* Note that this is unlike plain @ref spin(), which will strictly return when the deadline is reached,
* even if there still are unprocessed events.
* This method returns 0 if no errors occurred, or a negative error code if something failed (see error.hpp).
*/
int spinOnce()
{
return getScheduler().spinOnce();
}
/**
* This method allows to directly transmit a raw CAN frame circumventing the whole UAVCAN stack.
* Mandatory parameters:
*
* @param frame CAN frame to be transmitted.
*
* @param tx_deadline The frame will be discarded if it could not be transmitted by this time.
*
* @param iface_mask This bitmask allows to select what CAN interfaces this frame should go into.
* Example:
* - 1 - the frame will be sent only to iface 0.
* - 4 - the frame will be sent only to iface 2.
* - 3 - the frame will be sent to ifaces 0 and 1.
*
* Optional parameters:
*
* @param qos Quality of service. Please refer to the CAN IO manager for details.
*
* @param flags CAN IO flags. Please refer to the CAN driver API for details.
*/
int injectTxFrame(const CanFrame& frame, MonotonicTime tx_deadline, uint8_t iface_mask,
CanTxQueue::Qos qos = CanTxQueue::Volatile,
CanIOFlags flags = 0)
{
return getDispatcher().getCanIOManager().send(frame, tx_deadline, MonotonicTime(), iface_mask, qos, flags);
}
#if !UAVCAN_TINY
/**
* The @ref IRxFrameListener interface allows one to monitor all incoming CAN frames.
* This feature can be used to implement multithreaded nodes, or to add secondary protocol support.
*/
void removeRxFrameListener() { getDispatcher().removeRxFrameListener(); }
void installRxFrameListener(IRxFrameListener* lst) { getDispatcher().installRxFrameListener(lst); }
#endif
};
}
#endif // UAVCAN_NODE_ABSTRACT_NODE_HPP_INCLUDED
@@ -0,0 +1,201 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_GENERIC_PUBLISHER_HPP_INCLUDED
#define UAVCAN_NODE_GENERIC_PUBLISHER_HPP_INCLUDED
#include <uavcan/error.hpp>
#include <uavcan/node/abstract_node.hpp>
#include <uavcan/data_type.hpp>
#include <uavcan/node/global_data_type_registry.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/transport/transfer_buffer.hpp>
#include <uavcan/transport/transfer_sender.hpp>
#include <uavcan/marshal/scalar_codec.hpp>
#include <uavcan/marshal/types.hpp>
namespace uavcan
{
class GenericPublisherBase : Noncopyable
{
TransferSender sender_;
MonotonicDuration tx_timeout_;
INode& node_;
protected:
GenericPublisherBase(INode& node, MonotonicDuration tx_timeout,
MonotonicDuration max_transfer_interval)
: sender_(node.getDispatcher(), max_transfer_interval)
, tx_timeout_(tx_timeout)
, node_(node)
{
setTxTimeout(tx_timeout);
#if UAVCAN_DEBUG
UAVCAN_ASSERT(getTxTimeout() == tx_timeout); // Making sure default values are OK
#endif
}
~GenericPublisherBase() { }
bool isInited() const;
int doInit(DataTypeKind dtkind, const char* dtname, CanTxQueue::Qos qos);
MonotonicTime getTxDeadline() const;
int genericPublish(const StaticTransferBufferImpl& buffer, TransferType transfer_type,
NodeID dst_node_id, TransferID* tid, MonotonicTime blocking_deadline);
TransferSender& getTransferSender() { return sender_; }
const TransferSender& getTransferSender() const { return sender_; }
public:
static MonotonicDuration getMinTxTimeout() { return MonotonicDuration::fromUSec(200); }
static MonotonicDuration getMaxTxTimeout() { return MonotonicDuration::fromMSec(60000); }
MonotonicDuration getTxTimeout() const { return tx_timeout_; }
void setTxTimeout(MonotonicDuration tx_timeout);
/**
* By default, attempt to send a transfer from passive mode will result in an error @ref ErrPassive.
* This option allows to enable sending anonymous transfers from passive mode.
*/
void allowAnonymousTransfers()
{
sender_.allowAnonymousTransfers();
}
/**
* Priority of outgoing transfers.
*/
TransferPriority getPriority() const { return sender_.getPriority(); }
void setPriority(const TransferPriority prio) { sender_.setPriority(prio); }
INode& getNode() const { return node_; }
};
/**
* Generic publisher, suitable for messages and services.
* DataSpec - data type specification class
* DataStruct - instantiable class
*/
template <typename DataSpec, typename DataStruct>
class UAVCAN_EXPORT GenericPublisher : public GenericPublisherBase
{
struct ZeroTransferBuffer : public StaticTransferBufferImpl
{
ZeroTransferBuffer() : StaticTransferBufferImpl(UAVCAN_NULLPTR, 0) { }
};
typedef typename Select<DataStruct::MaxBitLen == 0,
ZeroTransferBuffer,
StaticTransferBuffer<BitLenToByteLen<DataStruct::MaxBitLen>::Result> >::Result Buffer;
enum
{
Qos = (DataTypeKind(DataSpec::DataTypeKind) == DataTypeKindMessage) ?
CanTxQueue::Volatile : CanTxQueue::Persistent
};
int checkInit();
int doEncode(const DataStruct& message, ITransferBuffer& buffer) const;
int genericPublish(const DataStruct& message, TransferType transfer_type, NodeID dst_node_id,
TransferID* tid, MonotonicTime blocking_deadline);
public:
/**
* @param max_transfer_interval Maximum expected time interval between subsequent publications. Leave default.
*/
GenericPublisher(INode& node, MonotonicDuration tx_timeout,
MonotonicDuration max_transfer_interval = TransferSender::getDefaultMaxTransferInterval())
: GenericPublisherBase(node, tx_timeout, max_transfer_interval)
{ }
~GenericPublisher() { }
/**
* Init method can be called prior first publication, but it's not necessary
* because the publisher can be automatically initialized ad-hoc.
*/
int init()
{
return checkInit();
}
/**
* This overload allows to set the priority; otherwise it's the same.
*/
int init(TransferPriority priority)
{
setPriority(priority);
return checkInit();
}
int publish(const DataStruct& message, TransferType transfer_type, NodeID dst_node_id,
MonotonicTime blocking_deadline = MonotonicTime())
{
return genericPublish(message, transfer_type, dst_node_id, UAVCAN_NULLPTR, blocking_deadline);
}
int publish(const DataStruct& message, TransferType transfer_type, NodeID dst_node_id, TransferID tid,
MonotonicTime blocking_deadline = MonotonicTime())
{
return genericPublish(message, transfer_type, dst_node_id, &tid, blocking_deadline);
}
};
// ----------------------------------------------------------------------------
template <typename DataSpec, typename DataStruct>
int GenericPublisher<DataSpec, DataStruct>::checkInit()
{
if (isInited())
{
return 0;
}
return doInit(DataTypeKind(DataSpec::DataTypeKind), DataSpec::getDataTypeFullName(), CanTxQueue::Qos(Qos));
}
template <typename DataSpec, typename DataStruct>
int GenericPublisher<DataSpec, DataStruct>::doEncode(const DataStruct& message, ITransferBuffer& buffer) const
{
BitStream bitstream(buffer);
ScalarCodec codec(bitstream);
const int encode_res = DataStruct::encode(message, codec);
if (encode_res <= 0)
{
UAVCAN_ASSERT(0); // Impossible, internal error
return -ErrInvalidMarshalData;
}
return encode_res;
}
template <typename DataSpec, typename DataStruct>
int GenericPublisher<DataSpec, DataStruct>::genericPublish(const DataStruct& message, TransferType transfer_type,
NodeID dst_node_id, TransferID* tid,
MonotonicTime blocking_deadline)
{
const int res = checkInit();
if (res < 0)
{
return res;
}
Buffer buffer;
const int encode_res = doEncode(message, buffer);
if (encode_res < 0)
{
return encode_res;
}
return GenericPublisherBase::genericPublish(buffer, transfer_type, dst_node_id, tid, blocking_deadline);
}
}
#endif // UAVCAN_NODE_GENERIC_PUBLISHER_HPP_INCLUDED
@@ -0,0 +1,299 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_GENERIC_SUBSCRIBER_HPP_INCLUDED
#define UAVCAN_NODE_GENERIC_SUBSCRIBER_HPP_INCLUDED
#include <uavcan/error.hpp>
#include <uavcan/node/abstract_node.hpp>
#include <uavcan/data_type.hpp>
#include <uavcan/node/global_data_type_registry.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/util/lazy_constructor.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/transport/transfer_listener.hpp>
#include <uavcan/marshal/scalar_codec.hpp>
#include <uavcan/marshal/types.hpp>
namespace uavcan
{
/**
* This class extends the data structure with extra information obtained from the transport layer,
* such as Source Node ID, timestamps, Transfer ID, index of the interface this transfer was picked up from, etc.
*
* PLEASE NOTE that since this class inherits the data structure type, subscription callbacks can accept either
* object of this class or the data structure type directly if the extra information is not needed.
*
* For example, both of these callbacks can be used with the same data structure 'Foo':
* void first(const ReceivedDataStructure<Foo>& msg);
* void second(const Foo& msg);
* In the latter case, an implicit cast will happen before the callback is invoked.
*
* This class is not copyable because it holds a reference to a stack-allocated transfer descriptor object.
* You can slice cast it to the underlying data type though, which would be copyable:
* DataType dt = rds; // where rds is of type ReceivedDataStructure<DataType>
* // dt is now copyable
*/
template <typename DataType_>
class UAVCAN_EXPORT ReceivedDataStructure : public DataType_, Noncopyable
{
const IncomingTransfer* const _transfer_; ///< Such weird name is necessary to avoid clashing with DataType fields
template <typename Ret, Ret(IncomingTransfer::*Fun) () const>
Ret safeget() const
{
if (_transfer_ == UAVCAN_NULLPTR)
{
return Ret();
}
return (_transfer_->*Fun)();
}
protected:
ReceivedDataStructure()
: _transfer_(UAVCAN_NULLPTR)
{ }
ReceivedDataStructure(const IncomingTransfer* arg_transfer)
: _transfer_(arg_transfer)
{
UAVCAN_ASSERT(arg_transfer != UAVCAN_NULLPTR);
}
public:
typedef DataType_ DataType;
MonotonicTime getMonotonicTimestamp() const
{
return safeget<MonotonicTime, &IncomingTransfer::getMonotonicTimestamp>();
}
UtcTime getUtcTimestamp() const { return safeget<UtcTime, &IncomingTransfer::getUtcTimestamp>(); }
TransferPriority getPriority() const { return safeget<TransferPriority, &IncomingTransfer::getPriority>(); }
TransferType getTransferType() const { return safeget<TransferType, &IncomingTransfer::getTransferType>(); }
TransferID getTransferID() const { return safeget<TransferID, &IncomingTransfer::getTransferID>(); }
NodeID getSrcNodeID() const { return safeget<NodeID, &IncomingTransfer::getSrcNodeID>(); }
uint8_t getIfaceIndex() const { return safeget<uint8_t, &IncomingTransfer::getIfaceIndex>(); }
bool isAnonymousTransfer() const { return safeget<bool, &IncomingTransfer::isAnonymousTransfer>(); }
};
/**
* This operator neatly prints the data structure prepended with extra data from the transport layer.
* The extra data will be represented as YAML comment.
*/
template <typename Stream, typename DataType>
static Stream& operator<<(Stream& s, const ReceivedDataStructure<DataType>& rds)
{
s << "# Received struct ts_m=" << rds.getMonotonicTimestamp()
<< " ts_utc=" << rds.getUtcTimestamp()
<< " snid=" << int(rds.getSrcNodeID().get()) << "\n";
s << static_cast<const DataType&>(rds);
return s;
}
class GenericSubscriberBase : Noncopyable
{
protected:
INode& node_;
uint32_t failure_count_;
explicit GenericSubscriberBase(INode& node)
: node_(node)
, failure_count_(0)
{ }
~GenericSubscriberBase() { }
int genericStart(TransferListener* listener, bool (Dispatcher::*registration_method)(TransferListener*));
void stop(TransferListener* listener);
public:
/**
* Returns the number of failed attempts to decode received message. Generally, a failed attempt means either:
* - Transient failure in the transport layer.
* - Incompatible data types.
*/
uint32_t getFailureCount() const { return failure_count_; }
INode& getNode() const { return node_; }
};
/**
* Please note that the reference passed to the RX callback points to a stack-allocated object, which means
* that it gets invalidated shortly after the callback returns.
*/
template <typename DataSpec, typename DataStruct, typename TransferListenerType>
class UAVCAN_EXPORT GenericSubscriber : public GenericSubscriberBase
{
typedef GenericSubscriber<DataSpec, DataStruct, TransferListenerType> SelfType;
// We need to break the inheritance chain here to implement lazy initialization
class TransferForwarder : public TransferListenerType
{
SelfType& obj_;
void handleIncomingTransfer(IncomingTransfer& transfer) override
{
obj_.handleIncomingTransfer(transfer);
}
public:
TransferForwarder(SelfType& obj,
const DataTypeDescriptor& data_type,
uint16_t max_buffer_size,
IPoolAllocator& allocator) :
TransferListenerType(obj.node_.getDispatcher().getTransferPerfCounter(),
data_type,
max_buffer_size,
allocator),
obj_(obj)
{ }
};
LazyConstructor<TransferForwarder> forwarder_;
int checkInit();
void handleIncomingTransfer(IncomingTransfer& transfer);
int genericStart(bool (Dispatcher::*registration_method)(TransferListener*));
protected:
struct ReceivedDataStructureSpec : public ReceivedDataStructure<DataStruct>
{
ReceivedDataStructureSpec() { }
ReceivedDataStructureSpec(const IncomingTransfer* arg_transfer) :
ReceivedDataStructure<DataStruct>(arg_transfer)
{ }
};
explicit GenericSubscriber(INode& node) : GenericSubscriberBase(node)
{ }
virtual ~GenericSubscriber() { stop(); }
virtual void handleReceivedDataStruct(ReceivedDataStructure<DataStruct>&) = 0;
int startAsMessageListener()
{
UAVCAN_TRACE("GenericSubscriber", "Start as message listener; dtname=%s", DataSpec::getDataTypeFullName());
return genericStart(&Dispatcher::registerMessageListener);
}
int startAsServiceRequestListener()
{
UAVCAN_TRACE("GenericSubscriber", "Start as service request listener; dtname=%s",
DataSpec::getDataTypeFullName());
return genericStart(&Dispatcher::registerServiceRequestListener);
}
int startAsServiceResponseListener()
{
UAVCAN_TRACE("GenericSubscriber", "Start as service response listener; dtname=%s",
DataSpec::getDataTypeFullName());
return genericStart(&Dispatcher::registerServiceResponseListener);
}
/**
* By default, anonymous transfers will be ignored.
* This option allows to enable reception of anonymous transfers.
*/
void allowAnonymousTransfers()
{
forwarder_->allowAnonymousTransfers();
}
/**
* Terminate the subscription.
* Dispatcher core will remove this instance from the subscribers list.
*/
void stop()
{
UAVCAN_TRACE("GenericSubscriber", "Stop; dtname=%s", DataSpec::getDataTypeFullName());
GenericSubscriberBase::stop(forwarder_);
}
TransferListenerType* getTransferListener() { return forwarder_; }
};
// ----------------------------------------------------------------------------
/*
* GenericSubscriber
*/
template <typename DataSpec, typename DataStruct, typename TransferListenerType>
int GenericSubscriber<DataSpec, DataStruct, TransferListenerType>::checkInit()
{
if (forwarder_)
{
return 0;
}
GlobalDataTypeRegistry::instance().freeze();
const DataTypeDescriptor* const descr =
GlobalDataTypeRegistry::instance().find(DataTypeKind(DataSpec::DataTypeKind), DataSpec::getDataTypeFullName());
if (descr == UAVCAN_NULLPTR)
{
UAVCAN_TRACE("GenericSubscriber", "Type [%s] is not registered", DataSpec::getDataTypeFullName());
return -ErrUnknownDataType;
}
static const uint16_t MaxBufferSize = BitLenToByteLen<DataStruct::MaxBitLen>::Result;
forwarder_.template construct<SelfType&, const DataTypeDescriptor&, uint16_t, IPoolAllocator&>
(*this, *descr, MaxBufferSize, node_.getAllocator());
return 0;
}
template <typename DataSpec, typename DataStruct, typename TransferListenerType>
void GenericSubscriber<DataSpec, DataStruct, TransferListenerType>::handleIncomingTransfer(IncomingTransfer& transfer)
{
ReceivedDataStructureSpec rx_struct(&transfer);
/*
* Decoding into the temporary storage
*/
BitStream bitstream(transfer);
ScalarCodec codec(bitstream);
const int decode_res = DataStruct::decode(rx_struct, codec);
// We don't need the data anymore, the memory can be reused from the callback:
transfer.release();
if (decode_res <= 0)
{
UAVCAN_TRACE("GenericSubscriber", "Unable to decode the message [%i] [%s]",
decode_res, DataSpec::getDataTypeFullName());
failure_count_++;
node_.getDispatcher().getTransferPerfCounter().addError();
return;
}
/*
* Invoking the callback
*/
handleReceivedDataStruct(rx_struct);
}
template <typename DataSpec, typename DataStruct, typename TransferListenerType>
int GenericSubscriber<DataSpec, DataStruct, TransferListenerType>::
genericStart(bool (Dispatcher::*registration_method)(TransferListener*))
{
const int res = checkInit();
if (res < 0)
{
UAVCAN_TRACE("GenericSubscriber", "Initialization failure [%s]", DataSpec::getDataTypeFullName());
return res;
}
return GenericSubscriberBase::genericStart(forwarder_, registration_method);
}
}
#endif // UAVCAN_NODE_GENERIC_SUBSCRIBER_HPP_INCLUDED
@@ -0,0 +1,241 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_GLOBAL_DATA_TYPE_REGISTRY_HPP_INCLUDED
#define UAVCAN_NODE_GLOBAL_DATA_TYPE_REGISTRY_HPP_INCLUDED
#include <cassert>
#include <uavcan/error.hpp>
#include <uavcan/std.hpp>
#include <uavcan/data_type.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/util/linked_list.hpp>
#if UAVCAN_DEBUG
# include <uavcan/debug.hpp>
#endif
namespace uavcan
{
/**
* This singleton is shared among all existing node instances. It is instantiated automatically
* when the C++ runtime executes contstructors before main().
*
* Its purpose is to keep the list of all UAVCAN data types known and used by this application.
*
* Also, the mapping between Data Type name and its Data Type ID is also stored in this singleton.
* UAVCAN data types with default Data Type ID that are autogenerated by the libuavcan DSDL compiler
* are registered automatically before main() (refer to the generated headers to see how exactly).
* Data types that don't have a default Data Type ID must be registered manually using the methods
* of this class (read the method documentation).
*
* Attempt to use a data type that was not registered with this singleton (e.g. publish, subscribe,
* perform a service call etc.) will fail with an error code @ref ErrUnknownDataType.
*/
class UAVCAN_EXPORT GlobalDataTypeRegistry : Noncopyable
{
struct Entry : public LinkedListNode<Entry>
{
DataTypeDescriptor descriptor;
Entry() { }
Entry(DataTypeKind kind, DataTypeID id, const DataTypeSignature& signature, const char* name)
: descriptor(kind, id, signature, name)
{ }
};
struct EntryInsertionComparator
{
const DataTypeID id;
explicit EntryInsertionComparator(const Entry* dtd)
: id((dtd == UAVCAN_NULLPTR) ? DataTypeID() : dtd->descriptor.getID())
{
UAVCAN_ASSERT(dtd != UAVCAN_NULLPTR);
}
bool operator()(const Entry* entry) const
{
UAVCAN_ASSERT(entry != UAVCAN_NULLPTR);
return entry->descriptor.getID() > id;
}
};
public:
/**
* Result of data type registration
*/
enum RegistrationResult
{
RegistrationResultOk, ///< Success, data type is now registered and can be used.
RegistrationResultCollision, ///< Data type name or ID is not unique.
RegistrationResultInvalidParams, ///< Invalid input parameters.
RegistrationResultFrozen ///< Data Type Registery has been frozen and can't be modified anymore.
};
private:
typedef LinkedListRoot<Entry> List;
mutable List msgs_;
mutable List srvs_;
bool frozen_;
GlobalDataTypeRegistry() : frozen_(false) { }
List* selectList(DataTypeKind kind) const;
RegistrationResult remove(Entry* dtd);
RegistrationResult registImpl(Entry* dtd);
public:
/**
* Returns the reference to the singleton.
*/
static GlobalDataTypeRegistry& instance();
/**
* Register a data type 'Type' with ID 'id'.
* If this data type was registered earlier, its old registration will be overridden.
* This method will fail if the data type registry is frozen.
*
* @tparam Type Autogenerated UAVCAN data type to register. Data types are generated by the
* libuavcan DSDL compiler from DSDL definitions.
*
* @param id Data Type ID for this data type.
*/
template <typename Type>
RegistrationResult registerDataType(DataTypeID id);
/**
* Data Type registry needs to be frozen before a node instance can use it in
* order to prevent accidental change in data type configuration on a running
* node.
*
* This method will be called automatically by the node during start up, so
* the user does not need to call it from the application manually. Subsequent
* calls will not have any effect.
*
* Once frozen, data type registry can't be unfrozen.
*/
void freeze();
bool isFrozen() const { return frozen_; }
/**
* Finds data type descriptor by full data type name, e.g. "uavcan.protocol.NodeStatus".
* Messages are searched first, then services.
* Returns null pointer if the data type with this name is not registered.
* @param name Full data type name
* @return Descriptor for this data type or null pointer if not found
*/
const DataTypeDescriptor* find(const char* name) const;
/**
* Finds data type descriptor by full data type name, e.g. "uavcan.protocol.NodeStatus", and data type kind.
* Returns null pointer if the data type with this name is not registered.
* @param kind Data Type Kind - message or service
* @param name Full data type name
* @return Descriptor for this data type or null pointer if not found
*/
const DataTypeDescriptor* find(DataTypeKind kind, const char* name) const;
/**
* Finds data type descriptor by data type ID.
* Returns null pointer if the data type with this ID is not registered.
* @param kind Data Type Kind - message or service
* @param dtid Data Type ID
* @return Descriptor for this data type or null pointer if not found
*/
const DataTypeDescriptor* find(DataTypeKind kind, DataTypeID dtid) const;
/**
* Returns the number of registered message types.
*/
unsigned getNumMessageTypes() const { return msgs_.getLength(); }
/**
* Returns the number of registered service types.
*/
unsigned getNumServiceTypes() const { return srvs_.getLength(); }
#if UAVCAN_DEBUG
/// Required for unit testing
void reset()
{
UAVCAN_TRACE("GlobalDataTypeRegistry", "Reset; was frozen: %i, num msgs: %u, num srvs: %u",
int(frozen_), getNumMessageTypes(), getNumServiceTypes());
frozen_ = false;
while (msgs_.get())
{
msgs_.remove(msgs_.get());
}
while (srvs_.get())
{
srvs_.remove(srvs_.get());
}
}
#endif
};
/**
* This class is used by autogenerated data types to register with the
* data type registry automatically before main() is called. Note that
* if a generated data type header file is not included by any translation
* unit of the application, the data type will not be registered.
*
* Data type needs to have a default ID to be registrable by this class.
*/
template <typename Type>
struct UAVCAN_EXPORT DefaultDataTypeRegistrator
{
DefaultDataTypeRegistrator()
{
#if !UAVCAN_NO_GLOBAL_DATA_TYPE_REGISTRY
const GlobalDataTypeRegistry::RegistrationResult res =
GlobalDataTypeRegistry::instance().registerDataType<Type>(Type::DefaultDataTypeID);
if (res != GlobalDataTypeRegistry::RegistrationResultOk)
{
handleFatalError("Type reg failed");
}
#endif
}
};
// ----------------------------------------------------------------------------
/*
* GlobalDataTypeRegistry
*/
template <typename Type>
GlobalDataTypeRegistry::RegistrationResult GlobalDataTypeRegistry::registerDataType(DataTypeID id)
{
if (isFrozen())
{
return RegistrationResultFrozen;
}
static Entry entry;
{
const RegistrationResult remove_res = remove(&entry);
if (remove_res != RegistrationResultOk)
{
return remove_res;
}
}
// We can't just overwrite the entry itself because it's noncopyable
entry.descriptor = DataTypeDescriptor(DataTypeKind(Type::DataTypeKind), id,
Type::getDataTypeSignature(), Type::getDataTypeFullName());
{
const RegistrationResult remove_res = remove(&entry);
if (remove_res != RegistrationResultOk)
{
return remove_res;
}
}
return registImpl(&entry);
}
}
#endif // UAVCAN_NODE_GLOBAL_DATA_TYPE_REGISTRY_HPP_INCLUDED
@@ -0,0 +1,319 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_NODE_HPP_INCLUDED
#define UAVCAN_NODE_NODE_HPP_INCLUDED
#include <cassert>
#include <uavcan/error.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/node/abstract_node.hpp>
// High-level functionality available by default
#include <uavcan/protocol/node_status_provider.hpp>
#if !UAVCAN_TINY
# include <uavcan/protocol/data_type_info_provider.hpp>
# include <uavcan/protocol/logger.hpp>
# include <uavcan/protocol/restart_request_server.hpp>
# include <uavcan/protocol/transport_stats_provider.hpp>
#endif
#if !defined(UAVCAN_CPP_VERSION) || !defined(UAVCAN_CPP11)
# error UAVCAN_CPP_VERSION
#endif
namespace uavcan
{
/**
* This is the top-level node API.
* A custom node class can be implemented if needed, in which case it shall inherit INode.
*
* @tparam MemPoolSize Size of memory pool for this node, in bytes.
* Please refer to the documentation for details.
* If this value is zero, the constructor will accept a reference to user-provided allocator.
*/
template <std::size_t MemPoolSize = 0>
class UAVCAN_EXPORT Node : public INode
{
typedef typename
Select<(MemPoolSize > 0),
PoolAllocator<MemPoolSize, MemPoolBlockSize>, // If pool size is specified, use default allocator
IPoolAllocator& // Otherwise use reference to user-provided allocator
>::Result Allocator;
Allocator pool_allocator_;
Scheduler scheduler_;
NodeStatusProvider proto_nsp_;
#if !UAVCAN_TINY
DataTypeInfoProvider proto_dtp_;
Logger proto_logger_;
RestartRequestServer proto_rrs_;
TransportStatsProvider proto_tsp_;
#endif
uint64_t internal_failure_cnt_;
bool started_;
void commonInit()
{
internal_failure_cnt_ = 0;
started_ = false;
}
protected:
virtual void registerInternalFailure(const char* msg) override
{
internal_failure_cnt_++;
UAVCAN_TRACE("Node", "Internal failure: %s", msg);
#if UAVCAN_TINY
(void)msg;
#else
(void)getLogger().log(protocol::debug::LogLevel::ERROR, "UAVCAN", msg);
#endif
}
public:
/**
* This overload is only valid if MemPoolSize > 0.
*/
Node(ICanDriver& can_driver,
ISystemClock& system_clock) :
scheduler_(can_driver, pool_allocator_, system_clock),
proto_nsp_(*this)
#if !UAVCAN_TINY
, proto_dtp_(*this)
, proto_logger_(*this)
, proto_rrs_(*this)
, proto_tsp_(*this)
#endif
{
commonInit();
}
/**
* This overload is only valid if MemPoolSize == 0.
*/
Node(ICanDriver& can_driver,
ISystemClock& system_clock,
IPoolAllocator& allocator) :
pool_allocator_(allocator),
scheduler_(can_driver, pool_allocator_, system_clock),
proto_nsp_(*this)
#if !UAVCAN_TINY
, proto_dtp_(*this)
, proto_logger_(*this)
, proto_rrs_(*this)
, proto_tsp_(*this)
#endif
{
commonInit();
}
virtual typename RemoveReference<Allocator>::Type& getAllocator() override { return pool_allocator_; }
virtual Scheduler& getScheduler() override { return scheduler_; }
virtual const Scheduler& getScheduler() const override { return scheduler_; }
int spin(MonotonicTime deadline)
{
if (started_)
{
return INode::spin(deadline);
}
return -ErrNotInited;
}
int spin(MonotonicDuration duration)
{
if (started_)
{
return INode::spin(duration);
}
return -ErrNotInited;
}
int spinOnce()
{
if (started_)
{
return INode::spinOnce();
}
return -ErrNotInited;
}
bool isStarted() const { return started_; }
uint64_t getInternalFailureCount() const { return internal_failure_cnt_; }
/**
* Starts the node and publishes uavcan.protocol.NodeStatus immediately.
* Does not so anything if the node is already started.
* Once started, the node can't stop.
* If the node failed to start up, it's recommended to destroy the current node instance and start over.
* Returns negative error code.
* @param node_status_transfer_priority Transfer priority that will be used for outgoing NodeStatus messages.
* Normal priority is used by default.
*/
int start(const TransferPriority node_status_transfer_priority = TransferPriority::Default);
/**
* Gets/sets the node name, e.g. "com.example.product_name". The node name can be set only once.
* The name must be set before the node is started, otherwise the node will refuse to start up.
*/
const NodeStatusProvider::NodeName& getName() const { return proto_nsp_.getName(); }
void setName(const NodeStatusProvider::NodeName& name) { proto_nsp_.setName(name); }
/**
* Node health code helpers.
*/
void setHealthOk() { proto_nsp_.setHealthOk(); }
void setHealthWarning() { proto_nsp_.setHealthWarning(); }
void setHealthError() { proto_nsp_.setHealthError(); }
void setHealthCritical() { proto_nsp_.setHealthCritical(); }
/**
* Node mode code helpers.
* Note that INITIALIZATION is the default mode; the application has to manually set it to OPERATIONAL.
*/
void setModeOperational() { proto_nsp_.setModeOperational(); }
void setModeInitialization() { proto_nsp_.setModeInitialization(); }
void setModeMaintenance() { proto_nsp_.setModeMaintenance(); }
void setModeSoftwareUpdate() { proto_nsp_.setModeSoftwareUpdate(); }
void setModeOfflineAndPublish()
{
proto_nsp_.setModeOffline();
(void)proto_nsp_.forcePublish();
}
/**
* Updates the vendor-specific status code.
*/
void setVendorSpecificStatusCode(NodeStatusProvider::VendorSpecificStatusCode code)
{
proto_nsp_.setVendorSpecificStatusCode(code);
}
/**
* Gets/sets the node version information.
*/
void setSoftwareVersion(const protocol::SoftwareVersion& version) { proto_nsp_.setSoftwareVersion(version); }
void setHardwareVersion(const protocol::HardwareVersion& version) { proto_nsp_.setHardwareVersion(version); }
const protocol::SoftwareVersion& getSoftwareVersion() const { return proto_nsp_.getSoftwareVersion(); }
const protocol::HardwareVersion& getHardwareVersion() const { return proto_nsp_.getHardwareVersion(); }
NodeStatusProvider& getNodeStatusProvider() { return proto_nsp_; }
#if !UAVCAN_TINY
/**
* Restart handler can be installed to handle external node restart requests (highly recommended).
*/
void setRestartRequestHandler(IRestartRequestHandler* handler) { proto_rrs_.setHandler(handler); }
RestartRequestServer& getRestartRequestServer() { return proto_rrs_; }
/**
* Node logging.
* Logging calls are passed directly into the @ref Logger instance.
* Type safe log formatting is supported only in C++11 mode.
* @{
*/
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
template <typename... Args>
inline void logDebug(const char* source, const char* format, Args... args)
{
(void)proto_logger_.logDebug(source, format, args...);
}
template <typename... Args>
inline void logInfo(const char* source, const char* format, Args... args)
{
(void)proto_logger_.logInfo(source, format, args...);
}
template <typename... Args>
inline void logWarning(const char* source, const char* format, Args... args)
{
(void)proto_logger_.logWarning(source, format, args...);
}
template <typename... Args>
inline void logError(const char* source, const char* format, Args... args)
{
(void)proto_logger_.logError(source, format, args...);
}
#else
void logDebug(const char* source, const char* text) { (void)proto_logger_.logDebug(source, text); }
void logInfo(const char* source, const char* text) { (void)proto_logger_.logInfo(source, text); }
void logWarning(const char* source, const char* text) { (void)proto_logger_.logWarning(source, text); }
void logError(const char* source, const char* text) { (void)proto_logger_.logError(source, text); }
#endif
/**
* @}
*/
/**
* Use this method to configure logging.
*/
Logger& getLogger() { return proto_logger_; }
#endif // UAVCAN_TINY
};
// ----------------------------------------------------------------------------
template <std::size_t MemPoolSize_>
int Node<MemPoolSize_>::start(const TransferPriority priority)
{
if (started_)
{
return 0;
}
GlobalDataTypeRegistry::instance().freeze();
int res = 0;
res = proto_nsp_.startAndPublish(priority);
if (res < 0)
{
goto fail;
}
#if !UAVCAN_TINY
res = proto_dtp_.start();
if (res < 0)
{
goto fail;
}
res = proto_logger_.init();
if (res < 0)
{
goto fail;
}
res = proto_rrs_.start();
if (res < 0)
{
goto fail;
}
res = proto_tsp_.start();
if (res < 0)
{
goto fail;
}
#endif
started_ = res >= 0;
return res;
fail:
UAVCAN_ASSERT(res < 0);
return res;
}
}
#endif // UAVCAN_NODE_NODE_HPP_INCLUDED
@@ -0,0 +1,89 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_PUBLISHER_HPP_INCLUDED
#define UAVCAN_NODE_PUBLISHER_HPP_INCLUDED
#include <uavcan/node/generic_publisher.hpp>
namespace uavcan
{
/**
* Use this class to publish messages to the bus (broadcast, unicast, or both).
*
* @tparam DataType_ Message data type
*/
template <typename DataType_>
class UAVCAN_EXPORT Publisher : protected GenericPublisher<DataType_, DataType_>
{
typedef GenericPublisher<DataType_, DataType_> BaseType;
public:
typedef DataType_ DataType; ///< Message data type
/**
* @param node Node instance this publisher will be registered with.
*
* @param tx_timeout If CAN frames of this message are not delivered to the bus
* in this amount of time, they will be discarded. Default value
* is good enough for rather high-frequency, high-priority messages.
*
* @param max_transfer_interval Maximum expected transfer interval. It's absolutely safe
* to leave default value here. It just defines how soon the
* Transfer ID tracking objects associated with this message type
* will be garbage collected by the library if it's no longer
* being published.
*/
explicit Publisher(INode& node, MonotonicDuration tx_timeout = getDefaultTxTimeout(),
MonotonicDuration max_transfer_interval = TransferSender::getDefaultMaxTransferInterval())
: BaseType(node, tx_timeout, max_transfer_interval)
{
#if UAVCAN_DEBUG
UAVCAN_ASSERT(getTxTimeout() == tx_timeout); // Making sure default values are OK
#endif
StaticAssert<DataTypeKind(DataType::DataTypeKind) == DataTypeKindMessage>::check();
}
/**
* Broadcast the message.
* Returns negative error code.
*/
int broadcast(const DataType& message)
{
return BaseType::publish(message, TransferTypeMessageBroadcast, NodeID::Broadcast);
}
/**
* Warning: You probably don't want to use this method; it's for advanced use cases like
* e.g. network time synchronization. Use the overloaded method with fewer arguments instead.
* This overload allows to explicitly specify Transfer ID.
* Returns negative error code.
*/
int broadcast(const DataType& message, TransferID tid)
{
return BaseType::publish(message, TransferTypeMessageBroadcast, NodeID::Broadcast, tid);
}
static MonotonicDuration getDefaultTxTimeout() { return MonotonicDuration::fromMSec(100); }
/**
* Init method can be called prior first publication, but it's not necessary
* because the publisher can be automatically initialized ad-hoc.
*/
using BaseType::init;
using BaseType::allowAnonymousTransfers;
using BaseType::getTransferSender;
using BaseType::getMinTxTimeout;
using BaseType::getMaxTxTimeout;
using BaseType::getTxTimeout;
using BaseType::setTxTimeout;
using BaseType::getPriority;
using BaseType::setPriority;
using BaseType::getNode;
};
}
#endif // UAVCAN_NODE_PUBLISHER_HPP_INCLUDED
@@ -0,0 +1,155 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_SCHEDULER_HPP_INCLUDED
#define UAVCAN_NODE_SCHEDULER_HPP_INCLUDED
#include <uavcan/error.hpp>
#include <uavcan/util/linked_list.hpp>
#include <uavcan/transport/dispatcher.hpp>
namespace uavcan
{
class UAVCAN_EXPORT Scheduler;
class UAVCAN_EXPORT DeadlineHandler : public LinkedListNode<DeadlineHandler>
{
MonotonicTime deadline_;
protected:
Scheduler& scheduler_;
explicit DeadlineHandler(Scheduler& scheduler)
: scheduler_(scheduler)
{ }
virtual ~DeadlineHandler() { stop(); }
public:
virtual void handleDeadline(MonotonicTime current) = 0;
void startWithDeadline(MonotonicTime deadline);
void startWithDelay(MonotonicDuration delay);
void generateDeadlineImmediately() { startWithDeadline(MonotonicTime::fromUSec(1)); }
void stop();
bool isRunning() const;
MonotonicTime getDeadline() const { return deadline_; }
Scheduler& getScheduler() const { return scheduler_; }
};
class UAVCAN_EXPORT DeadlineScheduler : Noncopyable
{
LinkedListRoot<DeadlineHandler> handlers_; // Ordered by deadline, lowest first
public:
void add(DeadlineHandler* mdh);
void remove(DeadlineHandler* mdh);
bool doesExist(const DeadlineHandler* mdh) const;
unsigned getNumHandlers() const { return handlers_.getLength(); }
MonotonicTime pollAndGetMonotonicTime(ISystemClock& sysclock);
MonotonicTime getEarliestDeadline() const;
};
/**
* This class distributes processing time between library components (IO handling, deadline callbacks, ...).
*/
class UAVCAN_EXPORT Scheduler : Noncopyable
{
enum { DefaultDeadlineResolutionMs = 5 };
enum { MinDeadlineResolutionMs = 1 };
enum { MaxDeadlineResolutionMs = 100 };
enum { DefaultCleanupPeriodMs = 1000 };
enum { MinCleanupPeriodMs = 10 };
enum { MaxCleanupPeriodMs = 10000 };
DeadlineScheduler deadline_scheduler_;
Dispatcher dispatcher_;
MonotonicTime prev_cleanup_ts_;
MonotonicDuration deadline_resolution_;
MonotonicDuration cleanup_period_;
bool inside_spin_;
struct InsideSpinSetter
{
Scheduler& owner;
InsideSpinSetter(Scheduler& o)
: owner(o)
{
owner.inside_spin_ = true;
}
~InsideSpinSetter() { owner.inside_spin_ = false; }
};
MonotonicTime computeDispatcherSpinDeadline(MonotonicTime spin_deadline) const;
void pollCleanup(MonotonicTime mono_ts, uint32_t num_frames_processed_with_last_spin);
public:
Scheduler(ICanDriver& can_driver, IPoolAllocator& allocator, ISystemClock& sysclock)
: dispatcher_(can_driver, allocator, sysclock)
, prev_cleanup_ts_(sysclock.getMonotonic())
, deadline_resolution_(MonotonicDuration::fromMSec(DefaultDeadlineResolutionMs))
, cleanup_period_(MonotonicDuration::fromMSec(DefaultCleanupPeriodMs))
, inside_spin_(false)
{ }
/**
* Spin until the deadline, or until some error occurs.
* This function will return strictly when the deadline is reached, even if there are unprocessed frames.
* Returns negative error code.
*/
int spin(MonotonicTime deadline);
/**
* Non-blocking version of @ref spin() - spins until all pending frames and events are processed,
* or until some error occurs. If there's nothing to do, returns immediately.
* Returns negative error code.
*/
int spinOnce();
DeadlineScheduler& getDeadlineScheduler() { return deadline_scheduler_; }
Dispatcher& getDispatcher() { return dispatcher_; }
const Dispatcher& getDispatcher() const { return dispatcher_; }
ISystemClock& getSystemClock() { return dispatcher_.getSystemClock(); }
MonotonicTime getMonotonicTime() const { return dispatcher_.getSystemClock().getMonotonic(); }
UtcTime getUtcTime() const { return dispatcher_.getSystemClock().getUtc(); }
/**
* Worst case deadline callback resolution.
* Higher resolution increases CPU usage.
*/
MonotonicDuration getDeadlineResolution() const { return deadline_resolution_; }
void setDeadlineResolution(MonotonicDuration res)
{
res = min(res, MonotonicDuration::fromMSec(MaxDeadlineResolutionMs));
res = max(res, MonotonicDuration::fromMSec(MinDeadlineResolutionMs));
deadline_resolution_ = res;
}
/**
* How often the scheduler will run cleanup (listeners, outgoing transfer registry, ...).
* Cleanup execution time grows linearly with number of listeners and number of items
* in the Outgoing Transfer ID registry.
* Lower period increases CPU usage.
*/
MonotonicDuration getCleanupPeriod() const { return cleanup_period_; }
void setCleanupPeriod(MonotonicDuration period)
{
period = min(period, MonotonicDuration::fromMSec(MaxCleanupPeriodMs));
period = max(period, MonotonicDuration::fromMSec(MinCleanupPeriodMs));
cleanup_period_ = period;
}
};
}
#endif // UAVCAN_NODE_SCHEDULER_HPP_INCLUDED
@@ -0,0 +1,585 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_SERVICE_CLIENT_HPP_INCLUDED
#define UAVCAN_NODE_SERVICE_CLIENT_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/util/multiset.hpp>
#include <uavcan/node/generic_publisher.hpp>
#include <uavcan/node/generic_subscriber.hpp>
#if !defined(UAVCAN_CPP_VERSION) || !defined(UAVCAN_CPP11)
# error UAVCAN_CPP_VERSION
#endif
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# include <functional>
#endif
namespace uavcan
{
/**
* This struct describes a pending service call.
* Refer to @ref ServiceClient to learn more about service calls.
*/
struct ServiceCallID
{
NodeID server_node_id;
TransferID transfer_id;
ServiceCallID() { }
ServiceCallID(NodeID arg_server_node_id, TransferID arg_transfer_id)
: server_node_id(arg_server_node_id)
, transfer_id(arg_transfer_id)
{ }
bool operator==(const ServiceCallID rhs) const
{
return (rhs.server_node_id == server_node_id) &&
(rhs.transfer_id == transfer_id);
}
bool isValid() const { return server_node_id.isUnicast(); }
};
/**
* Object of this type will be returned to the application as a result of service call.
* Note that application ALWAYS gets this result, even when it times out or fails because of some other reason.
* The class is made noncopyable because it keeps a reference to a stack-allocated object.
*/
template <typename DataType>
class UAVCAN_EXPORT ServiceCallResult : Noncopyable
{
public:
typedef ReceivedDataStructure<typename DataType::Response> ResponseFieldType;
enum Status { Success, ErrorTimeout };
private:
const Status status_; ///< Whether successful or not. Failure to decode the response causes timeout.
ServiceCallID call_id_; ///< Identifies the call
ResponseFieldType& response_; ///< Returned data structure. Value undefined if the service call has failed.
public:
ServiceCallResult(Status arg_status, ServiceCallID arg_call_id, ResponseFieldType& arg_response)
: status_(arg_status)
, call_id_(arg_call_id)
, response_(arg_response)
{
UAVCAN_ASSERT(call_id_.isValid());
UAVCAN_ASSERT((status_ == Success) || (status_ == ErrorTimeout));
}
/**
* Shortcut to quickly check whether the call was successful.
*/
bool isSuccessful() const { return status_ == Success; }
Status getStatus() const { return status_; }
ServiceCallID getCallID() const { return call_id_; }
/**
* Returned reference points to a stack-allocated object.
*/
const ResponseFieldType& getResponse() const { return response_; }
ResponseFieldType& getResponse() { return response_; }
};
/**
* This operator neatly prints the service call result prepended with extra data like Server Node ID.
* The extra data will be represented as YAML comment.
*/
template <typename Stream, typename DataType>
static Stream& operator<<(Stream& s, const ServiceCallResult<DataType>& scr)
{
s << "# Service call result [" << DataType::getDataTypeFullName() << "] "
<< (scr.isSuccessful() ? "OK" : "FAILURE")
<< " server_node_id=" << int(scr.getCallID().server_node_id.get())
<< " tid=" << int(scr.getCallID().transfer_id.get()) << "\n";
if (scr.isSuccessful())
{
s << scr.getResponse();
}
else
{
s << "# (no data)";
}
return s;
}
/**
* Do not use directly.
*/
class ServiceClientBase : protected ITransferAcceptanceFilter
, protected DeadlineHandler
{
const DataTypeDescriptor* data_type_descriptor_; ///< This will be initialized at the time of first call
protected:
class CallState : DeadlineHandler
{
ServiceClientBase& owner_;
const ServiceCallID id_;
bool timed_out_;
virtual void handleDeadline(MonotonicTime) override;
public:
CallState(INode& node, ServiceClientBase& owner, ServiceCallID call_id)
: DeadlineHandler(node.getScheduler())
, owner_(owner)
, id_(call_id)
, timed_out_(false)
{
UAVCAN_ASSERT(id_.isValid());
DeadlineHandler::startWithDelay(owner_.request_timeout_);
}
ServiceCallID getCallID() const { return id_; }
bool hasTimedOut() const { return timed_out_; }
static bool hasTimedOutPredicate(const CallState& cs) { return cs.hasTimedOut(); }
bool operator==(const CallState& rhs) const
{
return (&owner_ == &rhs.owner_) && (id_ == rhs.id_);
}
};
struct CallStateMatchingPredicate
{
const ServiceCallID id;
CallStateMatchingPredicate(ServiceCallID reference) : id(reference) { }
bool operator()(const CallState& state) const { return (state.getCallID() == id) && !state.hasTimedOut(); }
};
struct ServerSearchPredicate
{
const NodeID server_node_id;
ServerSearchPredicate(NodeID nid) : server_node_id(nid) { }
bool operator()(const CallState& state) const { return state.getCallID().server_node_id == server_node_id; }
};
MonotonicDuration request_timeout_;
ServiceClientBase(INode& node)
: DeadlineHandler(node.getScheduler())
, data_type_descriptor_(UAVCAN_NULLPTR)
, request_timeout_(getDefaultRequestTimeout())
{ }
virtual ~ServiceClientBase() { }
int prepareToCall(INode& node, const char* dtname, NodeID server_node_id, ServiceCallID& out_call_id);
public:
/**
* It's not recommended to override default timeouts.
* Change of this value will not affect pending calls.
*/
static MonotonicDuration getDefaultRequestTimeout() { return MonotonicDuration::fromMSec(1000); }
static MonotonicDuration getMinRequestTimeout() { return MonotonicDuration::fromMSec(10); }
static MonotonicDuration getMaxRequestTimeout() { return MonotonicDuration::fromMSec(60000); }
};
/**
* Use this class to invoke services on remote nodes.
*
* This class can manage multiple concurrent calls to the same or different remote servers. Number of concurrent
* calls is limited only by amount of available pool memory.
*
* Note that the reference passed to the callback points to a stack-allocated object, which means that the
* reference invalidates once the callback returns. If you want to use this object after the callback execution,
* you need to copy it somewhere.
*
* Note that by default, service client objects use lower priority than message publishers. Use @ref setPriority()
* to override the default if necessary.
*
* @tparam DataType_ Service data type.
*
* @tparam Callback_ Service response will be delivered through the callback of this type.
* In C++11 mode this type defaults to std::function<>.
* In C++03 mode this type defaults to a plain function pointer; use binder to
* call member functions as callbacks.
*/
template <typename DataType_,
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
typename Callback_ = std::function<void (const ServiceCallResult<DataType_>&)>
#else
typename Callback_ = void (*)(const ServiceCallResult<DataType_>&)
#endif
>
class UAVCAN_EXPORT ServiceClient
: public GenericSubscriber<DataType_,
typename DataType_::Response, TransferListenerWithFilter>
, public ServiceClientBase
{
public:
typedef DataType_ DataType;
typedef typename DataType::Request RequestType;
typedef typename DataType::Response ResponseType;
typedef ServiceCallResult<DataType> ServiceCallResultType;
typedef Callback_ Callback;
private:
typedef ServiceClient<DataType, Callback> SelfType;
typedef GenericPublisher<DataType, RequestType> PublisherType;
typedef GenericSubscriber<DataType, ResponseType, TransferListenerWithFilter> SubscriberType;
typedef Multiset<CallState> CallRegistry;
struct TimeoutCallbackCaller
{
ServiceClient& owner;
TimeoutCallbackCaller(ServiceClient& arg_owner) : owner(arg_owner) { }
void operator()(const CallState& state)
{
if (state.hasTimedOut())
{
UAVCAN_TRACE("ServiceClient::TimeoutCallbackCaller", "Timeout from nid=%d, tid=%d, dtname=%s",
int(state.getCallID().server_node_id.get()), int(state.getCallID().transfer_id.get()),
DataType::getDataTypeFullName());
typename SubscriberType::ReceivedDataStructureSpec rx_struct; // Default-initialized
ServiceCallResultType result(ServiceCallResultType::ErrorTimeout, state.getCallID(),
rx_struct); // Mutable!
owner.invokeCallback(result);
}
}
};
CallRegistry call_registry_;
PublisherType publisher_;
Callback callback_;
virtual bool shouldAcceptFrame(const RxFrame& frame) const override; // Called from the transfer listener
void invokeCallback(ServiceCallResultType& result);
virtual void handleReceivedDataStruct(ReceivedDataStructure<ResponseType>& response) override;
virtual void handleDeadline(MonotonicTime) override;
int addCallState(ServiceCallID call_id);
public:
/**
* @param node Node instance this client will be registered with.
* @param callback Callback instance. Optional, can be assigned later.
*/
explicit ServiceClient(INode& node, const Callback& callback = Callback())
: SubscriberType(node)
, ServiceClientBase(node)
, call_registry_(node.getAllocator())
, publisher_(node, getDefaultRequestTimeout())
, callback_(callback)
{
setPriority(TransferPriority::MiddleLower);
setRequestTimeout(getDefaultRequestTimeout());
#if UAVCAN_DEBUG
UAVCAN_ASSERT(getRequestTimeout() == getDefaultRequestTimeout()); // Making sure default values are OK
#endif
}
virtual ~ServiceClient() { cancelAllCalls(); }
/**
* Shall be called before first use.
* Returns negative error code.
*/
int init()
{
return publisher_.init();
}
/**
* Shall be called before first use.
* This overload allows to set the priority, otherwise it's the same.
* Returns negative error code.
*/
int init(TransferPriority priority)
{
return publisher_.init(priority);
}
/**
* Performs non-blocking service call.
* This method transmits the service request and returns immediately.
*
* Service response will be delivered into the application via the callback.
* Note that the callback will ALWAYS be called even if the service call times out; the
* actual result of the call (success/failure) will be passed to the callback as well.
*
* Returns negative error code.
*/
int call(NodeID server_node_id, const RequestType& request);
/**
* Same as plain @ref call() above, but this overload also returns the call ID of the new call.
* The call ID structure can be used to cancel this request later if needed.
*/
int call(NodeID server_node_id, const RequestType& request, ServiceCallID& out_call_id);
/**
* Cancels certain call referred via call ID structure.
*/
void cancelCall(ServiceCallID call_id);
/**
* Cancels all pending calls.
*/
void cancelAllCalls();
/**
* Checks whether there's currently a pending call addressed to the specified node ID.
*/
bool hasPendingCallToServer(NodeID server_node_id) const;
/**
* This method allows to traverse pending calls. If the index is out of range, an invalid call ID will be returned.
* Warning: average complexity is O(index); worst case complexity is O(size).
*/
ServiceCallID getCallIDByIndex(unsigned index) const;
/**
* Service response callback must be set prior service call.
*/
const Callback& getCallback() const { return callback_; }
void setCallback(const Callback& cb) { callback_ = cb; }
/**
* Complexity is O(N) of number of pending calls.
* Note that the number of pending calls will not be updated until the callback is executed.
*/
unsigned getNumPendingCalls() const { return call_registry_.getSize(); }
/**
* Complexity is O(1).
* Note that the number of pending calls will not be updated until the callback is executed.
*/
bool hasPendingCalls() const { return !call_registry_.isEmpty(); }
/**
* Returns the number of failed attempts to decode received response. Generally, a failed attempt means either:
* - Transient failure in the transport layer.
* - Incompatible data types.
*/
uint32_t getResponseFailureCount() const { return SubscriberType::getFailureCount(); }
/**
* Request timeouts. Note that changing the request timeout will not affect calls that are already pending.
* There is no such config as TX timeout - TX timeouts are configured automagically according to request timeouts.
* Not recommended to change.
*/
MonotonicDuration getRequestTimeout() const { return request_timeout_; }
void setRequestTimeout(MonotonicDuration timeout)
{
timeout = max(timeout, getMinRequestTimeout());
timeout = min(timeout, getMaxRequestTimeout());
publisher_.setTxTimeout(timeout);
request_timeout_ = max(timeout, publisher_.getTxTimeout()); // No less than TX timeout
}
/**
* Priority of outgoing request transfers.
* The remote server is supposed to use the same priority for the response, but it's not guaranteed by
* the specification.
*/
TransferPriority getPriority() const { return publisher_.getPriority(); }
void setPriority(const TransferPriority prio) { publisher_.setPriority(prio); }
};
// ----------------------------------------------------------------------------
template <typename DataType_, typename Callback_>
void ServiceClient<DataType_, Callback_>::invokeCallback(ServiceCallResultType& result)
{
if (coerceOrFallback<bool>(callback_, true))
{
callback_(result);
}
else
{
handleFatalError("Srv client clbk");
}
}
template <typename DataType_, typename Callback_>
bool ServiceClient<DataType_, Callback_>::shouldAcceptFrame(const RxFrame& frame) const
{
UAVCAN_ASSERT(frame.getTransferType() == TransferTypeServiceResponse); // Other types filtered out by dispatcher
return UAVCAN_NULLPTR != call_registry_.find(CallStateMatchingPredicate(ServiceCallID(frame.getSrcNodeID(),
frame.getTransferID())));
}
template <typename DataType_, typename Callback_>
void ServiceClient<DataType_, Callback_>::handleReceivedDataStruct(ReceivedDataStructure<ResponseType>& response)
{
UAVCAN_ASSERT(response.getTransferType() == TransferTypeServiceResponse);
ServiceCallID call_id(response.getSrcNodeID(), response.getTransferID());
cancelCall(call_id);
ServiceCallResultType result(ServiceCallResultType::Success, call_id, response); // Mutable!
invokeCallback(result);
}
template <typename DataType_, typename Callback_>
void ServiceClient<DataType_, Callback_>::handleDeadline(MonotonicTime)
{
UAVCAN_TRACE("ServiceClient", "Shared deadline event received");
/*
* Invoking callbacks for timed out call state objects.
*/
TimeoutCallbackCaller callback_caller(*this);
call_registry_.template forEach<TimeoutCallbackCaller&>(callback_caller);
/*
* Removing timed out objects.
* This operation cannot be merged with the previous one because that will not work with recursive calls.
*/
call_registry_.removeAllWhere(&CallState::hasTimedOutPredicate);
/*
* Subscriber does not need to be registered if we don't have any pending calls.
* Removing it makes processing of incoming frames a bit faster.
*/
if (call_registry_.isEmpty())
{
SubscriberType::stop();
}
}
template <typename DataType_, typename Callback_>
int ServiceClient<DataType_, Callback_>::addCallState(ServiceCallID call_id)
{
if (call_registry_.isEmpty())
{
const int subscriber_res = SubscriberType::startAsServiceResponseListener();
if (subscriber_res < 0)
{
UAVCAN_TRACE("ServiceClient", "Failed to start the subscriber, error: %i", subscriber_res);
return subscriber_res;
}
}
if (UAVCAN_NULLPTR == call_registry_.template emplace<INode&, ServiceClientBase&,
ServiceCallID>(SubscriberType::getNode(), *this, call_id))
{
SubscriberType::stop();
return -ErrMemory;
}
return 0;
}
template <typename DataType_, typename Callback_>
int ServiceClient<DataType_, Callback_>::call(NodeID server_node_id, const RequestType& request)
{
ServiceCallID dummy;
return call(server_node_id, request, dummy);
}
template <typename DataType_, typename Callback_>
int ServiceClient<DataType_, Callback_>::call(NodeID server_node_id, const RequestType& request,
ServiceCallID& out_call_id)
{
if (!coerceOrFallback<bool>(callback_, true))
{
UAVCAN_TRACE("ServiceClient", "Invalid callback");
return -ErrInvalidConfiguration;
}
/*
* Common procedures that don't depend on the struct data type
*/
const int prep_res =
prepareToCall(SubscriberType::getNode(), DataType::getDataTypeFullName(), server_node_id, out_call_id);
if (prep_res < 0)
{
UAVCAN_TRACE("ServiceClient", "Failed to prepare the call, error: %i", prep_res);
return prep_res;
}
/*
* Initializing the call state - this will start the subscriber ad-hoc
*/
const int call_state_res = addCallState(out_call_id);
if (call_state_res < 0)
{
UAVCAN_TRACE("ServiceClient", "Failed to add call state, error: %i", call_state_res);
return call_state_res;
}
/*
* Configuring the listener so it will accept only the matching responses
* TODO move to init(), but this requires to somewhat refactor GenericSubscriber<> (remove TransferForwarder)
*/
TransferListenerWithFilter* const tl = SubscriberType::getTransferListener();
if (tl == UAVCAN_NULLPTR)
{
UAVCAN_ASSERT(0); // Must have been created
cancelCall(out_call_id);
return -ErrLogic;
}
tl->installAcceptanceFilter(this);
/*
* Publishing the request
*/
const int publisher_res = publisher_.publish(request, TransferTypeServiceRequest, server_node_id,
out_call_id.transfer_id);
if (publisher_res < 0)
{
cancelCall(out_call_id);
return publisher_res;
}
UAVCAN_ASSERT(server_node_id == out_call_id.server_node_id);
return publisher_res;
}
template <typename DataType_, typename Callback_>
void ServiceClient<DataType_, Callback_>::cancelCall(ServiceCallID call_id)
{
call_registry_.removeFirstWhere(CallStateMatchingPredicate(call_id));
if (call_registry_.isEmpty())
{
SubscriberType::stop();
}
}
template <typename DataType_, typename Callback_>
void ServiceClient<DataType_, Callback_>::cancelAllCalls()
{
call_registry_.clear();
SubscriberType::stop();
}
template <typename DataType_, typename Callback_>
bool ServiceClient<DataType_, Callback_>::hasPendingCallToServer(NodeID server_node_id) const
{
return UAVCAN_NULLPTR != call_registry_.find(ServerSearchPredicate(server_node_id));
}
template <typename DataType_, typename Callback_>
ServiceCallID ServiceClient<DataType_, Callback_>::getCallIDByIndex(unsigned index) const
{
const CallState* const id = call_registry_.getByIndex(index);
return (id == UAVCAN_NULLPTR) ? ServiceCallID() : id->getCallID();
}
}
#endif // UAVCAN_NODE_SERVICE_CLIENT_HPP_INCLUDED
@@ -0,0 +1,201 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_SERVICE_SERVER_HPP_INCLUDED
#define UAVCAN_NODE_SERVICE_SERVER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/node/generic_publisher.hpp>
#include <uavcan/node/generic_subscriber.hpp>
#if !defined(UAVCAN_CPP_VERSION) || !defined(UAVCAN_CPP11)
# error UAVCAN_CPP_VERSION
#endif
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# include <functional>
#endif
namespace uavcan
{
/**
* This type can be used in place of the response type in a service server callback to get more advanced control
* of service request processing.
*
* PLEASE NOTE that since this class inherits the response type, service server callbacks can accept either
* object of this class or the response type directly if the extra options are not needed.
*
* For example, both of these callbacks can be used with the same service type 'Foo':
*
* void first(const ReceivedDataStructure<Foo::Request>& request,
* ServiceResponseDataStructure<Foo::Response>& response);
*
* void second(const Foo::Request& request,
* Foo::Response& response);
*
* In the latter case, an implicit cast will happen before the callback is invoked.
*/
template <typename ResponseDataType_>
class ServiceResponseDataStructure : public ResponseDataType_
{
// Fields are weirdly named to avoid name clashing with the inherited data type
bool _enabled_;
public:
typedef ResponseDataType_ ResponseDataType;
ServiceResponseDataStructure()
: _enabled_(true)
{ }
/**
* When disabled, the server will not transmit the response transfer.
* By default it is enabled, i.e. response will be sent.
*/
void setResponseEnabled(bool x) { _enabled_ = x; }
/**
* Whether the response will be sent. By default it will.
*/
bool isResponseEnabled() const { return _enabled_; }
};
/**
* Use this class to implement UAVCAN service servers.
*
* Note that the references passed to the callback may point to stack-allocated objects, which means that the
* references get invalidated once the callback returns.
*
* @tparam DataType_ Service data type.
*
* @tparam Callback_ Service calls will be delivered through the callback of this type, and service
* response will be returned via the output parameter of the callback. Note that
* the reference to service response data struct passed to the callback always points
* to a default initialized response object.
* Please also refer to @ref ReceivedDataStructure<> and @ref ServiceResponseDataStructure<>.
* In C++11 mode this type defaults to std::function<>.
* In C++03 mode this type defaults to a plain function pointer; use binder to
* call member functions as callbacks.
*/
template <typename DataType_,
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
typename Callback_ = std::function<void (const ReceivedDataStructure<typename DataType_::Request>&,
ServiceResponseDataStructure<typename DataType_::Response>&)>
#else
typename Callback_ = void (*)(const ReceivedDataStructure<typename DataType_::Request>&,
ServiceResponseDataStructure<typename DataType_::Response>&)
#endif
>
class UAVCAN_EXPORT ServiceServer
: public GenericSubscriber<DataType_, typename DataType_::Request, TransferListener>
{
public:
typedef DataType_ DataType;
typedef typename DataType::Request RequestType;
typedef typename DataType::Response ResponseType;
typedef Callback_ Callback;
private:
typedef GenericSubscriber<DataType, RequestType, TransferListener> SubscriberType;
typedef GenericPublisher<DataType, ResponseType> PublisherType;
PublisherType publisher_;
Callback callback_;
uint32_t response_failure_count_;
virtual void handleReceivedDataStruct(ReceivedDataStructure<RequestType>& request) override
{
UAVCAN_ASSERT(request.getTransferType() == TransferTypeServiceRequest);
ServiceResponseDataStructure<ResponseType> response;
if (coerceOrFallback<bool>(callback_, true))
{
UAVCAN_ASSERT(response.isResponseEnabled()); // Enabled by default
callback_(request, response);
}
else
{
handleFatalError("Srv serv clbk");
}
if (response.isResponseEnabled())
{
publisher_.setPriority(request.getPriority()); // Responding at the same priority.
const int res = publisher_.publish(response, TransferTypeServiceResponse, request.getSrcNodeID(),
request.getTransferID());
if (res < 0)
{
UAVCAN_TRACE("ServiceServer", "Response publication failure: %i", res);
publisher_.getNode().getDispatcher().getTransferPerfCounter().addError();
response_failure_count_++;
}
}
else
{
UAVCAN_TRACE("ServiceServer", "Response was suppressed by the application");
}
}
public:
explicit ServiceServer(INode& node)
: SubscriberType(node)
, publisher_(node, getDefaultTxTimeout())
, callback_()
, response_failure_count_(0)
{
UAVCAN_ASSERT(getTxTimeout() == getDefaultTxTimeout()); // Making sure it is valid
StaticAssert<DataTypeKind(DataType::DataTypeKind) == DataTypeKindService>::check();
}
/**
* Starts the server.
* Incoming service requests will be passed to the application via the callback.
*/
int start(const Callback& callback)
{
stop();
if (!coerceOrFallback<bool>(callback, true))
{
UAVCAN_TRACE("ServiceServer", "Invalid callback");
return -ErrInvalidParam;
}
callback_ = callback;
const int publisher_res = publisher_.init();
if (publisher_res < 0)
{
UAVCAN_TRACE("ServiceServer", "Publisher initialization failure: %i", publisher_res);
return publisher_res;
}
return SubscriberType::startAsServiceRequestListener();
}
/**
* Stops the server.
*/
using SubscriberType::stop;
static MonotonicDuration getDefaultTxTimeout() { return MonotonicDuration::fromMSec(1000); }
static MonotonicDuration getMinTxTimeout() { return PublisherType::getMinTxTimeout(); }
static MonotonicDuration getMaxTxTimeout() { return PublisherType::getMaxTxTimeout(); }
MonotonicDuration getTxTimeout() const { return publisher_.getTxTimeout(); }
void setTxTimeout(MonotonicDuration tx_timeout) { publisher_.setTxTimeout(tx_timeout); }
/**
* Returns the number of failed attempts to decode data structs. Generally, a failed attempt means either:
* - Transient failure in the transport layer.
* - Incompatible data types.
*/
uint32_t getRequestFailureCount() const { return SubscriberType::getFailureCount(); }
uint32_t getResponseFailureCount() const { return response_failure_count_; }
};
}
#endif // UAVCAN_NODE_SERVICE_SERVER_HPP_INCLUDED
@@ -0,0 +1,76 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_SUB_NODE_NODE_HPP_INCLUDED
#define UAVCAN_SUB_NODE_NODE_HPP_INCLUDED
#include <cassert>
#include <uavcan/build_config.hpp>
#include <uavcan/node/abstract_node.hpp>
#if UAVCAN_TINY
# error "This functionality is not available in tiny mode"
#endif
namespace uavcan
{
/**
* This node object can be used in multiprocess UAVCAN nodes.
* Please refer to the @ref Node<> for documentation concerning the template arguments; refer to the tutorials
* to lean how to use libuavcan in multiprocess applications.
*/
template <std::size_t MemPoolSize = 0>
class UAVCAN_EXPORT SubNode : public INode
{
typedef typename
Select<(MemPoolSize > 0),
PoolAllocator<MemPoolSize, MemPoolBlockSize>, // If pool size is specified, use default allocator
IPoolAllocator& // Otherwise use reference to user-provided allocator
>::Result Allocator;
Allocator pool_allocator_;
Scheduler scheduler_;
uint64_t internal_failure_cnt_;
protected:
virtual void registerInternalFailure(const char* msg)
{
internal_failure_cnt_++;
UAVCAN_TRACE("Node", "Internal failure: %s", msg);
(void)msg;
}
public:
/**
* This overload is only valid if MemPoolSize > 0.
*/
SubNode(ICanDriver& can_driver,
ISystemClock& system_clock) :
scheduler_(can_driver, pool_allocator_, system_clock),
internal_failure_cnt_(0)
{ }
/**
* This overload is only valid if MemPoolSize == 0.
*/
SubNode(ICanDriver& can_driver,
ISystemClock& system_clock,
IPoolAllocator& allocator) :
pool_allocator_(allocator),
scheduler_(can_driver, pool_allocator_, system_clock),
internal_failure_cnt_(0)
{ }
virtual typename RemoveReference<Allocator>::Type& getAllocator() { return pool_allocator_; }
virtual Scheduler& getScheduler() { return scheduler_; }
virtual const Scheduler& getScheduler() const { return scheduler_; }
uint64_t getInternalFailureCount() const { return internal_failure_cnt_; }
};
}
#endif // Include guard
@@ -0,0 +1,104 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_SUBSCRIBER_HPP_INCLUDED
#define UAVCAN_NODE_SUBSCRIBER_HPP_INCLUDED
#include <cassert>
#include <uavcan/build_config.hpp>
#include <uavcan/node/generic_subscriber.hpp>
#if !defined(UAVCAN_CPP_VERSION) || !defined(UAVCAN_CPP11)
# error UAVCAN_CPP_VERSION
#endif
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# include <functional>
#endif
namespace uavcan
{
/**
* Use this class to subscribe to a message.
*
* @tparam DataType_ Message data type.
*
* @tparam Callback_ Type of the callback that will be used to deliver received messages
* into the application. Type of the argument of the callback can be either:
* - DataType_&
* - const DataType_&
* - ReceivedDataStructure<DataType_>&
* - const ReceivedDataStructure<DataType_>&
* For the first two options, @ref ReceivedDataStructure<> will be casted implicitly.
* In C++11 mode this type defaults to std::function<>.
* In C++03 mode this type defaults to a plain function pointer; use binder to
* call member functions as callbacks.
*/
template <typename DataType_,
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
typename Callback_ = std::function<void (const ReceivedDataStructure<DataType_>&)>
#else
typename Callback_ = void (*)(const ReceivedDataStructure<DataType_>&)
#endif
>
class UAVCAN_EXPORT Subscriber
: public GenericSubscriber<DataType_, DataType_, TransferListener>
{
public:
typedef Callback_ Callback;
private:
typedef GenericSubscriber<DataType_, DataType_, TransferListener> BaseType;
Callback callback_;
virtual void handleReceivedDataStruct(ReceivedDataStructure<DataType_>& msg) override
{
if (coerceOrFallback<bool>(callback_, true))
{
callback_(msg);
}
else
{
handleFatalError("Sub clbk");
}
}
public:
typedef DataType_ DataType;
explicit Subscriber(INode& node)
: BaseType(node)
, callback_()
{
StaticAssert<DataTypeKind(DataType::DataTypeKind) == DataTypeKindMessage>::check();
}
/**
* Begin receiving messages.
* Each message will be passed to the application via the callback.
* Returns negative error code.
*/
int start(const Callback& callback)
{
stop();
if (!coerceOrFallback<bool>(callback, true))
{
UAVCAN_TRACE("Subscriber", "Invalid callback");
return -ErrInvalidParam;
}
callback_ = callback;
return BaseType::startAsMessageListener();
}
using BaseType::allowAnonymousTransfers;
using BaseType::stop;
using BaseType::getFailureCount;
};
}
#endif // UAVCAN_NODE_SUBSCRIBER_HPP_INCLUDED
@@ -0,0 +1,148 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_NODE_TIMER_HPP_INCLUDED
#define UAVCAN_NODE_TIMER_HPP_INCLUDED
#include <uavcan/std.hpp>
#include <uavcan/error.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/util/linked_list.hpp>
#include <uavcan/node/scheduler.hpp>
#include <uavcan/node/abstract_node.hpp>
#include <uavcan/util/templates.hpp>
#if !defined(UAVCAN_CPP11) || !defined(UAVCAN_CPP_VERSION)
# error UAVCAN_CPP_VERSION
#endif
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# include <functional>
#endif
namespace uavcan
{
class UAVCAN_EXPORT TimerBase;
/**
* Objects of this type will be supplied into timer callbacks.
*/
struct UAVCAN_EXPORT TimerEvent
{
MonotonicTime scheduled_time; ///< Time when the timer callback was expected to be invoked
MonotonicTime real_time; ///< True time when the timer callback was invoked
TimerEvent(MonotonicTime arg_scheduled_time, MonotonicTime arg_real_time)
: scheduled_time(arg_scheduled_time)
, real_time(arg_real_time)
{ }
};
/**
* Inherit this class if you need a timer callback method in your class.
*/
class UAVCAN_EXPORT TimerBase : private DeadlineHandler
{
MonotonicDuration period_;
virtual void handleDeadline(MonotonicTime current) override;
public:
using DeadlineHandler::stop;
using DeadlineHandler::isRunning;
using DeadlineHandler::getDeadline;
using DeadlineHandler::getScheduler;
explicit TimerBase(INode& node)
: DeadlineHandler(node.getScheduler())
, period_(MonotonicDuration::getInfinite())
{ }
/**
* Various ways to start the timer - periodically or once.
* If it is running already, it will be restarted.
* If the deadline is in the past, the event will fire immediately.
* In periodic mode the timer does not accumulate error over time.
*/
void startOneShotWithDeadline(MonotonicTime deadline);
void startOneShotWithDelay(MonotonicDuration delay);
void startPeriodic(MonotonicDuration period);
/**
* Returns period if the timer is in periodic mode.
* Returns infinite duration if the timer is in one-shot mode or stopped.
*/
MonotonicDuration getPeriod() const { return period_; }
/**
* Implement this method in your class to receive callbacks.
*/
virtual void handleTimerEvent(const TimerEvent& event) = 0;
};
/**
* Wrapper over TimerBase that forwards callbacks into arbitrary handlers, like
* functor objects, member functions or static functions.
*
* Callback must be set before the first event; otherwise the event will generate a fatal error.
*
* Also take a look at @ref MethodBinder<>, which may come useful if C++11 features are not available.
*
* @tparam Callback_ Callback type. Shall accept const reference to TimerEvent as its argument.
*/
template <typename Callback_>
class UAVCAN_EXPORT TimerEventForwarder : public TimerBase
{
public:
typedef Callback_ Callback;
private:
Callback callback_;
virtual void handleTimerEvent(const TimerEvent& event)
{
if (coerceOrFallback<bool>(callback_, true))
{
callback_(event);
}
else
{
handleFatalError("Invalid timer callback");
}
}
public:
explicit TimerEventForwarder(INode& node)
: TimerBase(node)
, callback_()
{ }
TimerEventForwarder(INode& node, const Callback& callback)
: TimerBase(node)
, callback_(callback)
{ }
/**
* Get/set the callback object.
* Callback must be set before the first event happens; otherwise the event will generate a fatal error.
*/
const Callback& getCallback() const { return callback_; }
void setCallback(const Callback& callback) { callback_ = callback; }
};
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
/**
* Use this timer in C++11 mode.
* Callback type is std::function<>.
*/
typedef TimerEventForwarder<std::function<void (const TimerEvent& event)> > Timer;
#endif
}
#endif // UAVCAN_NODE_TIMER_HPP_INCLUDED
@@ -0,0 +1,13 @@
High-level protocol logic
=========================
Classes defined in this directory implement some high-level functions of UAVCAN.
Since most applications won't use all of them at the same time, their definitions are provided right in the header
files rather than in source files, in order to not pollute the build outputs with unused code (which is also a
requirement for most safety-critical software).
However, some of the high-level functions that are either always used by the library itself or those that are extremely
likely to be used by the application are defined in .cpp files.
When adding a new class here, please think twice before putting its definition into a .cpp file.
@@ -0,0 +1,137 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DATA_TYPE_INFO_PROVIDER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DATA_TYPE_INFO_PROVIDER_HPP_INCLUDED
#include <uavcan/node/service_server.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/protocol/GetDataTypeInfo.hpp>
#include <uavcan/debug.hpp>
namespace uavcan
{
/**
* This class implements the standard services for data type introspection.
* Once started it does not require any attention from the application.
* The user does not need to deal with it directly - it's started by the node class.
*/
class UAVCAN_EXPORT DataTypeInfoProvider : Noncopyable
{
typedef MethodBinder<DataTypeInfoProvider*,
void (DataTypeInfoProvider::*)(const protocol::GetDataTypeInfo::Request&,
protocol::GetDataTypeInfo::Response&)> GetDataTypeInfoCallback;
ServiceServer<protocol::GetDataTypeInfo, GetDataTypeInfoCallback> gdti_srv_;
INode& getNode() { return gdti_srv_.getNode(); }
static bool isValidDataTypeKind(DataTypeKind kind)
{
return (kind == DataTypeKindMessage) || (kind == DataTypeKindService);
}
void handleGetDataTypeInfoRequest(const protocol::GetDataTypeInfo::Request& request,
protocol::GetDataTypeInfo::Response& response)
{
/*
* Asking the Global Data Type Registry for the matching type descriptor, either by name or by ID
*/
const DataTypeDescriptor* desc = UAVCAN_NULLPTR;
if (request.name.empty())
{
response.id = request.id; // Pre-setting the fields so they have meaningful values even in
response.kind = request.kind; // ...case of failure.
if (!isValidDataTypeKind(DataTypeKind(request.kind.value)))
{
UAVCAN_TRACE("DataTypeInfoProvider", "GetDataTypeInfo request with invalid DataTypeKind %i",
static_cast<int>(request.kind.value));
return;
}
desc = GlobalDataTypeRegistry::instance().find(DataTypeKind(request.kind.value), request.id);
}
else
{
response.name = request.name;
desc = GlobalDataTypeRegistry::instance().find(request.name.c_str());
}
if (desc == UAVCAN_NULLPTR)
{
UAVCAN_TRACE("DataTypeInfoProvider",
"Cannot process GetDataTypeInfo for nonexistent type: dtid=%i dtk=%i name='%s'",
static_cast<int>(request.id), static_cast<int>(request.kind.value), request.name.c_str());
return;
}
UAVCAN_TRACE("DataTypeInfoProvider", "GetDataTypeInfo request for %s", desc->toString().c_str());
/*
* Filling the response struct
*/
response.signature = desc->getSignature().get();
response.id = desc->getID().get();
response.kind.value = desc->getKind();
response.flags = protocol::GetDataTypeInfo::Response::FLAG_KNOWN;
response.name = desc->getFullName();
const Dispatcher& dispatcher = getNode().getDispatcher();
if (desc->getKind() == DataTypeKindService)
{
if (dispatcher.hasServer(desc->getID().get()))
{
response.flags |= protocol::GetDataTypeInfo::Response::FLAG_SERVING;
}
}
else if (desc->getKind() == DataTypeKindMessage)
{
if (dispatcher.hasSubscriber(desc->getID().get()))
{
response.flags |= protocol::GetDataTypeInfo::Response::FLAG_SUBSCRIBED;
}
if (dispatcher.hasPublisher(desc->getID().get()))
{
response.flags |= protocol::GetDataTypeInfo::Response::FLAG_PUBLISHING;
}
}
else
{
UAVCAN_ASSERT(0); // That means that GDTR somehow found a type of an unknown kind. The horror.
}
}
public:
explicit DataTypeInfoProvider(INode& node) :
gdti_srv_(node)
{ }
int start()
{
int res = 0;
res = gdti_srv_.start(GetDataTypeInfoCallback(this, &DataTypeInfoProvider::handleGetDataTypeInfoRequest));
if (res < 0)
{
goto fail;
}
UAVCAN_ASSERT(res >= 0);
return res;
fail:
UAVCAN_ASSERT(res < 0);
gdti_srv_.stop();
return res;
}
};
}
#endif // UAVCAN_PROTOCOL_DATA_TYPE_INFO_PROVIDER_HPP_INCLUDED
@@ -0,0 +1,112 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_CLIENT_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_CLIENT_HPP_INCLUDED
#include <uavcan/node/subscriber.hpp>
#include <uavcan/node/publisher.hpp>
#include <uavcan/node/timer.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/protocol/dynamic_node_id/Allocation.hpp>
#include <uavcan/protocol/HardwareVersion.hpp>
namespace uavcan
{
/**
* This class implements client-side logic of dynamic node ID allocation procedure.
*
* Once started, the object will be publishing dynamic node ID allocation requests at the default frequency defined
* by the specification, until a Node ID is granted by the allocator.
*
* If the local node is equipped with redundant CAN interfaces, all of them will be used for publishing requests
* and listening for responses.
*
* Once dynamic allocation is complete (or not needed anymore), the object can be deleted.
*
* Note that this class uses std::rand(), which must be correctly seeded before use.
*/
class UAVCAN_EXPORT DynamicNodeIDClient : private TimerBase
{
typedef MethodBinder<DynamicNodeIDClient*,
void (DynamicNodeIDClient::*)
(const ReceivedDataStructure<protocol::dynamic_node_id::Allocation>&)>
AllocationCallback;
enum Mode
{
ModeWaitingForTimeSlot,
ModeDelayBeforeFollowup,
NumModes
};
Publisher<protocol::dynamic_node_id::Allocation> dnida_pub_;
Subscriber<protocol::dynamic_node_id::Allocation, AllocationCallback> dnida_sub_;
uint8_t unique_id_[protocol::HardwareVersion::FieldTypes::unique_id::MaxSize];
uint8_t size_of_received_unique_id_;
NodeID preferred_node_id_;
NodeID allocated_node_id_;
NodeID allocator_node_id_;
void terminate();
static MonotonicDuration getRandomDuration(uint32_t lower_bound_msec, uint32_t upper_bound_msec);
void restartTimer(const Mode mode);
virtual void handleTimerEvent(const TimerEvent&) override;
void handleAllocation(const ReceivedDataStructure<protocol::dynamic_node_id::Allocation>& msg);
public:
typedef protocol::HardwareVersion::FieldTypes::unique_id UniqueID;
DynamicNodeIDClient(INode& node)
: TimerBase(node)
, dnida_pub_(node)
, dnida_sub_(node)
, size_of_received_unique_id_(0)
{ }
/**
* @param unique_id Unique ID of the local node. Must be the same as in the hardware version struct.
* @param preferred_node_id Node ID that the application would like to take; set to broadcast (zero) if
* the application doesn't have any preference (this is default).
* @param transfer_priority Transfer priority, Normal by default.
* @return Zero on success
* Negative error code on failure
* -ErrLogic if 1. the node is not in passive mode or 2. the client is already started
*/
int start(const UniqueID& unique_id,
const NodeID preferred_node_id = NodeID::Broadcast,
const TransferPriority transfer_priority = TransferPriority::OneHigherThanLowest);
/**
* Use this method to determine when allocation is complete.
*/
bool isAllocationComplete() const { return getAllocatedNodeID().isUnicast(); }
/**
* This method allows to retrieve the node ID that was allocated to the local node.
* If no node ID was allocated yet, the returned node ID will be invalid (non-unicast).
* @return If allocation is complete, a valid unicast node ID will be returned.
* If allocation is not complete yet, a non-unicast node ID will be returned.
*/
NodeID getAllocatedNodeID() const { return allocated_node_id_; }
/**
* This method allows to retrieve node ID of the allocator that granted our Node ID.
* If no node ID was allocated yet, the returned node ID will be invalid (non-unicast).
* @return If allocation is complete, a valid unicast node ID will be returned.
* If allocation is not complete yet, an non-unicast node ID will be returned.
*/
NodeID getAllocatorNodeID() const { return allocator_node_id_; }
};
}
#endif // UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_CLIENT_HPP_INCLUDED
@@ -0,0 +1,114 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_SERVER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_SERVER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/protocol/dynamic_node_id_server/allocation_request_manager.hpp>
#include <uavcan/protocol/dynamic_node_id_server/node_discoverer.hpp>
#include <uavcan/protocol/dynamic_node_id_server/event.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
class AbstractServer : protected IAllocationRequestHandler
, protected INodeDiscoveryHandler
{
UniqueID own_unique_id_;
MonotonicTime started_at_;
protected:
INode& node_;
IEventTracer& tracer_;
AllocationRequestManager allocation_request_manager_;
NodeDiscoverer node_discoverer_;
AbstractServer(INode& node,
IEventTracer& tracer) :
node_(node),
tracer_(tracer),
allocation_request_manager_(node, tracer, *this),
node_discoverer_(node, tracer, *this)
{ }
const UniqueID& getOwnUniqueID() const { return own_unique_id_; }
int init(const UniqueID& own_unique_id, const TransferPriority priority)
{
int res = 0;
own_unique_id_ = own_unique_id;
res = allocation_request_manager_.init(priority);
if (res < 0)
{
return res;
}
res = node_discoverer_.init(priority);
if (res < 0)
{
return res;
}
started_at_ = node_.getMonotonicTime();
return 0;
}
public:
/**
* This can be used to guess if there are any un-allocated dynamic nodes left in the network.
*/
bool guessIfAllDynamicNodesAreAllocated(
const MonotonicDuration& allocation_activity_timeout =
MonotonicDuration::fromMSec(Allocation::MAX_REQUEST_PERIOD_MS * 2),
const MonotonicDuration& min_uptime = MonotonicDuration::fromMSec(6000)) const
{
const MonotonicTime ts = node_.getMonotonicTime();
/*
* If uptime is not large enough, the allocator may be unaware about some nodes yet.
*/
const MonotonicDuration uptime = ts - started_at_;
if (uptime < max(allocation_activity_timeout, min_uptime))
{
return false;
}
/*
* If there are any undiscovered nodes, assume that allocation is still happening.
*/
if (node_discoverer_.hasUnknownNodes())
{
return false;
}
/*
* Lastly, check if there wasn't any allocation messages detected on the bus in the specified amount of time.
*/
const MonotonicDuration since_allocation_activity =
ts - allocation_request_manager_.getTimeOfLastAllocationActivity();
if (since_allocation_activity < allocation_activity_timeout)
{
return false;
}
return true;
}
/**
* This is useful for debugging/testing/monitoring.
*/
const NodeDiscoverer& getNodeDiscoverer() const { return node_discoverer_; }
};
}
}
#endif // Include guard
@@ -0,0 +1,286 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_ALLOCATION_REQUEST_MANAGER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_ALLOCATION_REQUEST_MANAGER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/node/subscriber.hpp>
#include <uavcan/node/publisher.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/protocol/dynamic_node_id_server/types.hpp>
#include <uavcan/protocol/dynamic_node_id_server/event.hpp>
// UAVCAN types
#include <uavcan/protocol/dynamic_node_id/Allocation.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
/**
* The main allocator must implement this interface.
*/
class IAllocationRequestHandler
{
public:
/**
* Allocation request manager uses this method to detect if it is allowed to publish follow-up responses.
*/
virtual bool canPublishFollowupAllocationResponse() const = 0;
/**
* This method will be invoked when a new allocation request is received.
*/
virtual void handleAllocationRequest(const UniqueID& unique_id, NodeID preferred_node_id) = 0;
virtual ~IAllocationRequestHandler() { }
};
/**
* This class manages communication with allocation clients.
* Three-stage unique ID exchange is implemented here, as well as response publication.
*/
class AllocationRequestManager
{
typedef MethodBinder<AllocationRequestManager*,
void (AllocationRequestManager::*)(const ReceivedDataStructure<Allocation>&)>
AllocationCallback;
const MonotonicDuration stage_timeout_;
MonotonicTime last_message_timestamp_;
MonotonicTime last_activity_timestamp_;
Allocation::FieldTypes::unique_id current_unique_id_;
IAllocationRequestHandler& handler_;
IEventTracer& tracer_;
Subscriber<Allocation, AllocationCallback> allocation_sub_;
Publisher<Allocation> allocation_pub_;
enum { InvalidStage = 0 };
void trace(TraceCode code, int64_t argument) { tracer_.onEvent(code, argument); }
static uint8_t detectRequestStage(const Allocation& msg)
{
const uint8_t max_bytes_per_request = Allocation::MAX_LENGTH_OF_UNIQUE_ID_IN_REQUEST;
if ((msg.unique_id.size() != max_bytes_per_request) &&
(msg.unique_id.size() != (msg.unique_id.capacity() - max_bytes_per_request * 2U)) &&
(msg.unique_id.size() != msg.unique_id.capacity())) // Future proofness for CAN FD
{
return InvalidStage;
}
if (msg.first_part_of_unique_id)
{
return 1; // Note that CAN FD frames can deliver the unique ID in one stage!
}
if (msg.unique_id.size() == Allocation::MAX_LENGTH_OF_UNIQUE_ID_IN_REQUEST)
{
return 2;
}
if (msg.unique_id.size() < Allocation::MAX_LENGTH_OF_UNIQUE_ID_IN_REQUEST)
{
return 3;
}
return InvalidStage;
}
uint8_t getExpectedStage() const
{
if (current_unique_id_.empty())
{
return 1;
}
if (current_unique_id_.size() >= (Allocation::MAX_LENGTH_OF_UNIQUE_ID_IN_REQUEST * 2))
{
return 3;
}
if (current_unique_id_.size() >= Allocation::MAX_LENGTH_OF_UNIQUE_ID_IN_REQUEST)
{
return 2;
}
return InvalidStage;
}
void publishFollowupAllocationResponse()
{
Allocation msg;
msg.unique_id = current_unique_id_;
UAVCAN_ASSERT(msg.unique_id.size() < msg.unique_id.capacity());
UAVCAN_TRACE("AllocationRequestManager", "Intermediate response with %u bytes of unique ID",
unsigned(msg.unique_id.size()));
trace(TraceAllocationFollowupResponse, msg.unique_id.size());
const int res = allocation_pub_.broadcast(msg);
if (res < 0)
{
trace(TraceError, res);
allocation_pub_.getNode().registerInternalFailure("Dynamic allocation broadcast");
}
}
void handleAllocation(const ReceivedDataStructure<Allocation>& msg)
{
trace(TraceAllocationActivity, msg.getSrcNodeID().get());
last_activity_timestamp_ = msg.getMonotonicTimestamp();
if (!msg.isAnonymousTransfer())
{
return; // This is a response from another allocator, ignore
}
/*
* Reset the expected stage on timeout
*/
if (msg.getMonotonicTimestamp() > (last_message_timestamp_ + stage_timeout_))
{
UAVCAN_TRACE("AllocationRequestManager", "Stage timeout, reset");
current_unique_id_.clear();
trace(TraceAllocationFollowupTimeout, (msg.getMonotonicTimestamp() - last_message_timestamp_).toUSec());
}
/*
* Checking if request stage matches the expected stage
*/
const uint8_t request_stage = detectRequestStage(msg);
if (request_stage == InvalidStage)
{
trace(TraceAllocationBadRequest, msg.unique_id.size());
return; // Malformed request - ignore without resetting
}
const uint8_t expected_stage = getExpectedStage();
if (expected_stage == InvalidStage)
{
UAVCAN_ASSERT(0);
return;
}
if (request_stage != expected_stage)
{
trace(TraceAllocationUnexpectedStage, request_stage);
return; // Ignore - stage mismatch
}
const uint8_t max_expected_bytes =
static_cast<uint8_t>(current_unique_id_.capacity() - current_unique_id_.size());
UAVCAN_ASSERT(max_expected_bytes > 0);
if (msg.unique_id.size() > max_expected_bytes)
{
trace(TraceAllocationBadRequest, msg.unique_id.size());
return; // Malformed request
}
/*
* Updating the local state
*/
for (uint8_t i = 0; i < msg.unique_id.size(); i++)
{
current_unique_id_.push_back(msg.unique_id[i]);
}
trace(TraceAllocationRequestAccepted, current_unique_id_.size());
/*
* Proceeding with allocation if possible
* Note that single-frame CAN FD allocation requests will be delivered to the server even if it's not leader.
*/
if (current_unique_id_.size() == current_unique_id_.capacity())
{
UAVCAN_TRACE("AllocationRequestManager", "Allocation request received; preferred node ID: %d",
int(msg.node_id));
UniqueID unique_id;
copy(current_unique_id_.begin(), current_unique_id_.end(), unique_id.begin());
current_unique_id_.clear();
{
uint64_t event_agrument = 0;
for (uint8_t i = 0; i < 8; i++)
{
event_agrument |= static_cast<uint64_t>(unique_id[i]) << (56U - i * 8U);
}
trace(TraceAllocationExchangeComplete, static_cast<int64_t>(event_agrument));
}
handler_.handleAllocationRequest(unique_id, msg.node_id);
}
else
{
if (handler_.canPublishFollowupAllocationResponse())
{
publishFollowupAllocationResponse();
}
else
{
trace(TraceAllocationFollowupDenied, 0);
current_unique_id_.clear();
}
}
/*
* It is super important to update timestamp only if the request has been processed successfully.
*/
last_message_timestamp_ = msg.getMonotonicTimestamp();
}
public:
AllocationRequestManager(INode& node, IEventTracer& tracer, IAllocationRequestHandler& handler)
: stage_timeout_(MonotonicDuration::fromMSec(Allocation::FOLLOWUP_TIMEOUT_MS))
, handler_(handler)
, tracer_(tracer)
, allocation_sub_(node)
, allocation_pub_(node)
{ }
int init(const TransferPriority priority)
{
int res = allocation_pub_.init(priority);
if (res < 0)
{
return res;
}
allocation_pub_.setTxTimeout(MonotonicDuration::fromMSec(Allocation::FOLLOWUP_TIMEOUT_MS));
res = allocation_sub_.start(AllocationCallback(this, &AllocationRequestManager::handleAllocation));
if (res < 0)
{
return res;
}
allocation_sub_.allowAnonymousTransfers();
return 0;
}
int broadcastAllocationResponse(const UniqueID& unique_id, NodeID allocated_node_id)
{
Allocation msg;
msg.unique_id.resize(msg.unique_id.capacity());
copy(unique_id.begin(), unique_id.end(), msg.unique_id.begin());
msg.node_id = allocated_node_id.get();
trace(TraceAllocationResponse, msg.node_id);
last_activity_timestamp_ = allocation_pub_.getNode().getMonotonicTime();
return allocation_pub_.broadcast(msg);
}
/**
* When the last allocation activity was registered.
* This value can be used to heuristically determine whether there are any unallocated nodes left in the network.
*/
MonotonicTime getTimeOfLastAllocationActivity() const { return last_activity_timestamp_; }
};
}
}
#endif // Include guard
@@ -0,0 +1,20 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_CENTRALIZED_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_CENTRALIZED_HPP_INCLUDED
#include <uavcan/protocol/dynamic_node_id_server/centralized/server.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
typedef centralized::Server CentralizedServer;
}
}
#endif // UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_CENTRALIZED_HPP_INCLUDED
@@ -0,0 +1,180 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_CENTRALIZED_SERVER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_CENTRALIZED_SERVER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/protocol/dynamic_node_id_server/abstract_server.hpp>
#include <uavcan/protocol/dynamic_node_id_server/node_id_selector.hpp>
#include <uavcan/protocol/dynamic_node_id_server/storage_marshaller.hpp>
#include <uavcan/protocol/dynamic_node_id_server/centralized/storage.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
namespace centralized
{
/**
* This server is an alternative to @ref DistributedServer with the following differences:
* - It is not distributed, so using it means introducing a single point of failure into the system.
* - It takes less code space and requires less RAM, which makes it suitable for resource-constrained applications.
*
* This version is suitable only for simple non-critical systems.
*/
class Server : public AbstractServer
{
Storage storage_;
/*
* Private methods
*/
bool isNodeIDTaken(const NodeID node_id) const
{
return storage_.isNodeIDOccupied(node_id);
}
void tryPublishAllocationResult(const NodeID node_id, const UniqueID& unique_id)
{
const int res = allocation_request_manager_.broadcastAllocationResponse(unique_id, node_id);
if (res < 0)
{
tracer_.onEvent(TraceError, res);
node_.registerInternalFailure("Dynamic allocation response");
}
}
/*
* Methods of IAllocationRequestHandler
*/
virtual bool canPublishFollowupAllocationResponse() const override
{
return true; // Because there's only one Centralized server in the system
}
virtual void handleAllocationRequest(const UniqueID& unique_id, const NodeID preferred_node_id) override
{
const NodeID existing_node_id = storage_.getNodeIDForUniqueID(unique_id);
if (existing_node_id.isValid())
{
tryPublishAllocationResult(existing_node_id, unique_id);
}
else
{
const NodeID allocated_node_id =
NodeIDSelector<Server>(this, &Server::isNodeIDTaken).findFreeNodeID(preferred_node_id);
if (allocated_node_id.isUnicast())
{
const int res = storage_.add(allocated_node_id, unique_id);
if (res >= 0)
{
tryPublishAllocationResult(allocated_node_id, unique_id);
}
else
{
tracer_.onEvent(TraceError, res);
node_.registerInternalFailure("CentralizedServer storage add");
}
}
else
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::Server", "Request ignored - no free node ID left");
}
}
}
/*
* Methods of INodeDiscoveryHandler
*/
virtual bool canDiscoverNewNodes() const override
{
return true; // Because there's only one Centralized server in the system
}
virtual NodeAwareness checkNodeAwareness(NodeID node_id) const override
{
return storage_.isNodeIDOccupied(node_id) ? NodeAwarenessKnownAndCommitted : NodeAwarenessUnknown;
}
virtual void handleNewNodeDiscovery(const UniqueID* unique_id_or_null, NodeID node_id) override
{
if (storage_.isNodeIDOccupied(node_id))
{
UAVCAN_ASSERT(0); // Such node is already known, the class that called this method should have known that
return;
}
const int res = storage_.add(node_id, (unique_id_or_null == UAVCAN_NULLPTR) ? UniqueID() : *unique_id_or_null);
if (res < 0)
{
tracer_.onEvent(TraceError, res);
node_.registerInternalFailure("CentralizedServer storage add");
}
}
public:
Server(INode& node,
IStorageBackend& storage,
IEventTracer& tracer)
: AbstractServer(node, tracer)
, storage_(storage)
{ }
int init(const UniqueID& own_unique_id,
const TransferPriority priority = TransferPriority::OneHigherThanLowest)
{
/*
* Initializing storage first, because the next step requires it to be loaded
*/
int res = storage_.init();
if (res < 0)
{
return res;
}
/*
* Common logic
*/
res = AbstractServer::init(own_unique_id, priority);
if (res < 0)
{
return res;
}
/*
* Making sure that the server is started with the same node ID
*/
{
const NodeID stored_own_node_id = storage_.getNodeIDForUniqueID(getOwnUniqueID());
if (stored_own_node_id.isValid())
{
if (stored_own_node_id != node_.getNodeID())
{
return -ErrInvalidConfiguration;
}
}
else
{
res = storage_.add(node_.getNodeID(), getOwnUniqueID());
if (res < 0)
{
return res;
}
}
}
return 0;
}
uint8_t getNumAllocations() const { return storage_.getSize(); }
};
}
}
}
#endif // UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_CENTRALIZED_SERVER_HPP_INCLUDED
@@ -0,0 +1,150 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_CENTRALIZED_STORAGE_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_CENTRALIZED_STORAGE_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/protocol/dynamic_node_id_server/storage_marshaller.hpp>
#include <uavcan/protocol/dynamic_node_id_server/event.hpp>
#include <uavcan/util/bitset.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
namespace centralized
{
/**
* This class transparently replicates its state to the storage backend, keeping the most recent state in memory.
* Writes are slow, reads are instantaneous.
*/
class Storage
{
typedef BitSet<NodeID::Max + 1> OccupationMask;
typedef Array<IntegerSpec<8, SignednessUnsigned, CastModeSaturate>, ArrayModeStatic,
BitLenToByteLen<NodeID::Max + 1>::Result>
OccupationMaskArray;
IStorageBackend& storage_;
OccupationMask occupation_mask_;
static IStorageBackend::String getOccupationMaskKey() { return "occupation_mask"; }
static OccupationMask maskFromArray(const OccupationMaskArray& array)
{
OccupationMask mask;
for (uint8_t byte = 0; byte < array.size(); byte++)
{
for (uint8_t bit = 0; bit < 8; bit++)
{
mask[byte * 8U + bit] = (array[byte] & (1U << bit)) != 0;
}
}
return mask;
}
static OccupationMaskArray maskToArray(const OccupationMask& mask)
{
OccupationMaskArray array;
for (uint8_t byte = 0; byte < array.size(); byte++)
{
for (uint8_t bit = 0; bit < 8; bit++)
{
if (mask[byte * 8U + bit])
{
array[byte] = static_cast<uint8_t>(array[byte] | (1U << bit));
}
}
}
return array;
}
public:
Storage(IStorageBackend& storage) :
storage_(storage)
{ }
/**
* This method reads only the occupation mask from the storage.
*/
int init()
{
StorageMarshaller io(storage_);
OccupationMaskArray array;
io.get(getOccupationMaskKey(), array);
occupation_mask_ = maskFromArray(array);
return 0;
}
/**
* This method invokes storage IO.
* Returned value indicates whether the entry was successfully appended.
*/
int add(const NodeID node_id, const UniqueID& unique_id)
{
if (!node_id.isUnicast())
{
return -ErrInvalidParam;
}
StorageMarshaller io(storage_);
// If next operations fail, we'll get a dangling entry, but it's absolutely OK.
{
uint32_t node_id_int = node_id.get();
int res = io.setAndGetBack(StorageMarshaller::convertUniqueIDToHex(unique_id), node_id_int);
if (res < 0)
{
return res;
}
if (node_id_int != node_id.get())
{
return -ErrFailure;
}
}
// Updating the mask in the storage
OccupationMask new_occupation_mask = occupation_mask_;
new_occupation_mask[node_id.get()] = true;
OccupationMaskArray occupation_array = maskToArray(new_occupation_mask);
int res = io.setAndGetBack(getOccupationMaskKey(), occupation_array);
if (res < 0)
{
return res;
}
if (occupation_array != maskToArray(new_occupation_mask))
{
return -ErrFailure;
}
// Updating the cached mask only if the storage was updated successfully
occupation_mask_ = new_occupation_mask;
return 0;
}
/**
* Returns an invalid node ID if there's no such allocation.
*/
NodeID getNodeIDForUniqueID(const UniqueID& unique_id) const
{
StorageMarshaller io(storage_);
uint32_t node_id = 0;
io.get(StorageMarshaller::convertUniqueIDToHex(unique_id), node_id);
return (node_id > 0 && node_id <= NodeID::Max) ? NodeID(static_cast<uint8_t>(node_id)) : NodeID();
}
bool isNodeIDOccupied(NodeID node_id) const { return occupation_mask_[node_id.get()]; }
uint8_t getSize() const { return static_cast<uint8_t>(occupation_mask_.count()); }
};
}
}
}
#endif // Include guard
@@ -0,0 +1,20 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_HPP_INCLUDED
#include <uavcan/protocol/dynamic_node_id_server/distributed/server.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
typedef distributed::Server DistributedServer;
}
}
#endif // UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_HPP_INCLUDED
@@ -0,0 +1,433 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_CLUSTER_MANAGER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_CLUSTER_MANAGER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/node/timer.hpp>
#include <uavcan/node/subscriber.hpp>
#include <uavcan/node/publisher.hpp>
#include <uavcan/protocol/dynamic_node_id_server/distributed/log.hpp>
#include <uavcan/protocol/dynamic_node_id_server/distributed/types.hpp>
#include <uavcan/protocol/dynamic_node_id_server/storage_marshaller.hpp>
#include <uavcan/protocol/dynamic_node_id_server/event.hpp>
// UAVCAN types
#include <uavcan/protocol/dynamic_node_id/server/Discovery.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
namespace distributed
{
/**
* This class maintains the cluster state.
*/
class ClusterManager : private TimerBase
{
public:
enum { MaxClusterSize = Discovery::FieldTypes::known_nodes::MaxSize };
private:
typedef MethodBinder<ClusterManager*,
void (ClusterManager::*)
(const ReceivedDataStructure<Discovery>&)>
DiscoveryCallback;
struct Server
{
NodeID node_id;
Log::Index next_index;
Log::Index match_index;
Server()
: next_index(0)
, match_index(0)
{ }
void resetIndices(const Log& log)
{
next_index = Log::Index(log.getLastIndex() + 1U);
match_index = 0;
}
};
IStorageBackend& storage_;
IEventTracer& tracer_;
const Log& log_;
Subscriber<Discovery, DiscoveryCallback> discovery_sub_;
mutable Publisher<Discovery> discovery_pub_;
Server servers_[MaxClusterSize - 1]; ///< Minus one because the local server is not listed there.
uint8_t cluster_size_;
uint8_t num_known_servers_;
static IStorageBackend::String getStorageKeyForClusterSize() { return "cluster_size"; }
INode& getNode() { return discovery_sub_.getNode(); }
const INode& getNode() const { return discovery_sub_.getNode(); }
const Server* findServer(NodeID node_id) const { return const_cast<ClusterManager*>(this)->findServer(node_id); }
Server* findServer(NodeID node_id)
{
for (uint8_t i = 0; i < num_known_servers_; i++)
{
UAVCAN_ASSERT(servers_[i].node_id.isUnicast());
if (servers_[i].node_id == node_id)
{
return &servers_[i];
}
}
return UAVCAN_NULLPTR;
}
virtual void handleTimerEvent(const TimerEvent&)
{
UAVCAN_ASSERT(num_known_servers_ < cluster_size_);
tracer_.onEvent(TraceRaftDiscoveryBroadcast, num_known_servers_);
/*
* Filling the message
*/
Discovery msg;
msg.configured_cluster_size = cluster_size_;
msg.known_nodes.push_back(getNode().getNodeID().get()); // Putting ourselves at index 0
for (uint8_t i = 0; i < num_known_servers_; i++)
{
UAVCAN_ASSERT(servers_[i].node_id.isUnicast());
msg.known_nodes.push_back(servers_[i].node_id.get());
}
UAVCAN_ASSERT(msg.known_nodes.size() == (num_known_servers_ + 1));
/*
* Broadcasting
*/
UAVCAN_TRACE("dynamic_node_id_server::distributed::ClusterManager",
"Broadcasting Discovery message; known nodes: %d of %d",
int(msg.known_nodes.size()), int(cluster_size_));
const int res = discovery_pub_.broadcast(msg);
if (res < 0)
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::ClusterManager", "Discovery broadcst failed: %d", res);
getNode().registerInternalFailure("Raft discovery broadcast");
}
/*
* Termination condition
*/
if (isClusterDiscovered())
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::ClusterManager",
"Discovery broadcasting timer stopped");
stop();
}
}
void handleDiscovery(const ReceivedDataStructure<Discovery>& msg)
{
tracer_.onEvent(TraceRaftDiscoveryReceived, msg.getSrcNodeID().get());
/*
* Validating cluster configuration
* If there's a case of misconfiguration, the message will be ignored.
*/
if (msg.configured_cluster_size != cluster_size_)
{
tracer_.onEvent(TraceRaftBadClusterSizeReceived, msg.configured_cluster_size);
getNode().registerInternalFailure("Bad Raft cluster size");
return;
}
/*
* Updating the set of known servers
*/
for (uint8_t i = 0; i < msg.known_nodes.size(); i++)
{
if (isClusterDiscovered())
{
break;
}
const NodeID node_id(msg.known_nodes[i]);
if (node_id.isUnicast() && !isKnownServer(node_id))
{
addServer(node_id);
}
}
/*
* Publishing a new Discovery request if the publishing server needs to learn about more servers.
*/
if (msg.configured_cluster_size > msg.known_nodes.size())
{
startDiscoveryPublishingTimerIfNotRunning();
}
}
void startDiscoveryPublishingTimerIfNotRunning()
{
if (!isRunning())
{
startPeriodic(MonotonicDuration::fromMSec(Discovery::BROADCASTING_PERIOD_MS));
}
}
public:
enum { ClusterSizeUnknown = 0 };
/**
* @param node Needed to publish and subscribe to Discovery message
* @param storage Needed to read the cluster size parameter from the storage
* @param log Needed to initialize nextIndex[] values after elections
*/
ClusterManager(INode& node, IStorageBackend& storage, const Log& log, IEventTracer& tracer)
: TimerBase(node)
, storage_(storage)
, tracer_(tracer)
, log_(log)
, discovery_sub_(node)
, discovery_pub_(node)
, cluster_size_(0)
, num_known_servers_(0)
{ }
/**
* If cluster_size is set to ClusterSizeUnknown, the class will try to read this parameter from the
* storage backend using key 'cluster_size'.
* Returns negative error code.
*/
int init(const uint8_t init_cluster_size, const TransferPriority priority)
{
/*
* Figuring out the cluster size
*/
if (init_cluster_size == ClusterSizeUnknown)
{
// Reading from the storage
StorageMarshaller io(storage_);
uint32_t value = 0;
int res = io.get(getStorageKeyForClusterSize(), value);
if (res < 0)
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::ClusterManager",
"Cluster size is neither configured nor stored in the storage");
return res;
}
if ((value == 0) || (value > MaxClusterSize))
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::ClusterManager", "Cluster size is invalid");
return -ErrInvalidConfiguration;
}
cluster_size_ = static_cast<uint8_t>(value);
}
else
{
if ((init_cluster_size == 0) || (init_cluster_size > MaxClusterSize))
{
return -ErrInvalidParam;
}
cluster_size_ = init_cluster_size;
// Writing the storage
StorageMarshaller io(storage_);
uint32_t value = init_cluster_size;
int res = io.setAndGetBack(getStorageKeyForClusterSize(), value);
if ((res < 0) || (value != init_cluster_size))
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::ClusterManager", "Failed to store cluster size");
return -ErrFailure;
}
}
tracer_.onEvent(TraceRaftClusterSizeInited, cluster_size_);
UAVCAN_ASSERT(cluster_size_ > 0);
UAVCAN_ASSERT(cluster_size_ <= MaxClusterSize);
/*
* Initializing pub/sub and timer
*/
int res = discovery_pub_.init(priority);
if (res < 0)
{
return res;
}
res = discovery_sub_.start(DiscoveryCallback(this, &ClusterManager::handleDiscovery));
if (res < 0)
{
return res;
}
startDiscoveryPublishingTimerIfNotRunning();
/*
* Misc
*/
resetAllServerIndices();
return 0;
}
/**
* Adds once server regardless of the discovery logic.
*/
void addServer(NodeID node_id)
{
UAVCAN_ASSERT((num_known_servers_ + 1) < MaxClusterSize);
if (!isKnownServer(node_id) && node_id.isUnicast())
{
tracer_.onEvent(TraceRaftNewServerDiscovered, node_id.get());
servers_[num_known_servers_].node_id = node_id;
servers_[num_known_servers_].resetIndices(log_);
num_known_servers_ = static_cast<uint8_t>(num_known_servers_ + 1U);
}
else
{
UAVCAN_ASSERT(0);
}
}
/**
* Whether such server has been discovered.
*/
bool isKnownServer(NodeID node_id) const
{
if (node_id == getNode().getNodeID())
{
return true;
}
for (uint8_t i = 0; i < num_known_servers_; i++)
{
UAVCAN_ASSERT(servers_[i].node_id.isUnicast());
UAVCAN_ASSERT(servers_[i].node_id != getNode().getNodeID());
if (servers_[i].node_id == node_id)
{
return true;
}
}
return false;
}
/**
* An invalid node ID will be returned if there's no such server.
* The local server is not listed there.
*/
NodeID getRemoteServerNodeIDAtIndex(uint8_t index) const
{
if (index < num_known_servers_)
{
return servers_[index].node_id;
}
return NodeID();
}
/**
* See next_index[] in Raft paper.
*/
Log::Index getServerNextIndex(NodeID server_node_id) const
{
const Server* const s = findServer(server_node_id);
if (s != UAVCAN_NULLPTR)
{
return s->next_index;
}
UAVCAN_ASSERT(0);
return 0;
}
void incrementServerNextIndexBy(NodeID server_node_id, Log::Index increment)
{
Server* const s = findServer(server_node_id);
if (s != UAVCAN_NULLPTR)
{
s->next_index = Log::Index(s->next_index + increment);
}
else
{
UAVCAN_ASSERT(0);
}
}
void decrementServerNextIndex(NodeID server_node_id)
{
Server* const s = findServer(server_node_id);
if (s != UAVCAN_NULLPTR)
{
s->next_index--;
}
else
{
UAVCAN_ASSERT(0);
}
}
/**
* See match_index[] in Raft paper.
*/
Log::Index getServerMatchIndex(NodeID server_node_id) const
{
const Server* const s = findServer(server_node_id);
if (s != UAVCAN_NULLPTR)
{
return s->match_index;
}
UAVCAN_ASSERT(0);
return 0;
}
void setServerMatchIndex(NodeID server_node_id, Log::Index match_index)
{
Server* const s = findServer(server_node_id);
if (s != UAVCAN_NULLPTR)
{
s->match_index = match_index;
}
else
{
UAVCAN_ASSERT(0);
}
}
/**
* This method must be called when the current server becomes leader.
*/
void resetAllServerIndices()
{
for (uint8_t i = 0; i < num_known_servers_; i++)
{
UAVCAN_ASSERT(servers_[i].node_id.isUnicast());
servers_[i].resetIndices(log_);
}
}
/**
* Number of known servers can only grow, and it never exceeds the cluster size value.
* This number does not include the local server.
*/
uint8_t getNumKnownServers() const { return num_known_servers_; }
/**
* Cluster size and quorum size are constant.
*/
uint8_t getClusterSize() const { return cluster_size_; }
uint8_t getQuorumSize() const { return static_cast<uint8_t>(cluster_size_ / 2U + 1U); }
bool isClusterDiscovered() const { return num_known_servers_ == (cluster_size_ - 1); }
};
}
}
}
#endif // Include guard
@@ -0,0 +1,310 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_LOG_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_LOG_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/protocol/dynamic_node_id_server/distributed/types.hpp>
#include <uavcan/protocol/dynamic_node_id_server/storage_marshaller.hpp>
#include <uavcan/protocol/dynamic_node_id_server/event.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
namespace distributed
{
/**
* Raft log.
* This class transparently replicates its state to the storage backend, keeping the most recent state in memory.
* Writes are slow, reads are instantaneous.
*/
class Log
{
public:
typedef uint8_t Index;
enum { Capacity = NodeID::Max + 1 };
private:
IStorageBackend& storage_;
IEventTracer& tracer_;
Entry entries_[Capacity];
Index last_index_; // Index zero always contains an empty entry
static IStorageBackend::String getLastIndexKey() { return "log_last_index"; }
static IStorageBackend::String makeEntryKey(Index index, const char* postfix)
{
IStorageBackend::String str;
// "log0_foobar"
str += "log";
str.appendFormatted("%d", int(index));
str += "_";
str += postfix;
return str;
}
int readEntryFromStorage(Index index, Entry& out_entry)
{
const StorageMarshaller io(storage_);
// Term
if (io.get(makeEntryKey(index, "term"), out_entry.term) < 0)
{
return -ErrFailure;
}
// Unique ID
if (io.get(makeEntryKey(index, "unique_id"), out_entry.unique_id) < 0)
{
return -ErrFailure;
}
// Node ID
uint32_t node_id = 0;
if (io.get(makeEntryKey(index, "node_id"), node_id) < 0)
{
return -ErrFailure;
}
if (node_id > NodeID::Max)
{
return -ErrFailure;
}
out_entry.node_id = static_cast<uint8_t>(node_id);
return 0;
}
int writeEntryToStorage(Index index, const Entry& entry)
{
Entry temp = entry;
StorageMarshaller io(storage_);
// Term
if (io.setAndGetBack(makeEntryKey(index, "term"), temp.term) < 0)
{
return -ErrFailure;
}
// Unique ID
if (io.setAndGetBack(makeEntryKey(index, "unique_id"), temp.unique_id) < 0)
{
return -ErrFailure;
}
// Node ID
uint32_t node_id = entry.node_id;
if (io.setAndGetBack(makeEntryKey(index, "node_id"), node_id) < 0)
{
return -ErrFailure;
}
temp.node_id = static_cast<uint8_t>(node_id);
return (temp == entry) ? 0 : -ErrFailure;
}
int initEmptyLogStorage()
{
StorageMarshaller io(storage_);
/*
* Writing the zero entry - it must always be default-initialized
*/
entries_[0] = Entry();
int res = writeEntryToStorage(0, entries_[0]);
if (res < 0)
{
return res;
}
/*
* Initializing last index
* Last index must be written AFTER the zero entry, otherwise if the write fails here the storage will be
* left in an inconsistent state.
*/
last_index_ = 0;
uint32_t stored_index = 0;
res = io.setAndGetBack(getLastIndexKey(), stored_index);
if (res < 0)
{
return res;
}
if (stored_index != 0)
{
return -ErrFailure;
}
return 0;
}
public:
Log(IStorageBackend& storage, IEventTracer& tracer)
: storage_(storage)
, tracer_(tracer)
, last_index_(0)
{ }
int init()
{
StorageMarshaller io(storage_);
// Reading max index
{
uint32_t value = 0;
if (io.get(getLastIndexKey(), value) < 0)
{
if (storage_.get(getLastIndexKey()).empty())
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::Log", "Initializing empty storage");
return initEmptyLogStorage();
}
else
{
// There's some data in the storage, but it cannot be parsed - reporting an error
UAVCAN_TRACE("dynamic_node_id_server::distributed::Log", "Failed to read last index");
return -ErrFailure;
}
}
if (value >= Capacity)
{
return -ErrFailure;
}
last_index_ = Index(value);
}
tracer_.onEvent(TraceRaftLogLastIndexRestored, last_index_);
// Restoring log entries - note that index 0 always exists
for (Index index = 0; index <= last_index_; index++)
{
const int result = readEntryFromStorage(index, entries_[index]);
if (result < 0)
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::Log", "Failed to read entry at index %u: %d",
unsigned(index), result);
return result;
}
}
UAVCAN_TRACE("dynamic_node_id_server::distributed::Log", "Restored %u log entries", unsigned(last_index_));
return 0;
}
/**
* This method invokes storage IO.
* Returned value indicates whether the entry was successfully appended.
*/
int append(const Entry& entry)
{
if ((last_index_ + 1) >= Capacity)
{
return -ErrLogic;
}
tracer_.onEvent(TraceRaftLogAppend, last_index_ + 1U);
// If next operations fail, we'll get a dangling entry, but it's absolutely OK.
int res = writeEntryToStorage(Index(last_index_ + 1), entry);
if (res < 0)
{
return res;
}
// Updating the last index
StorageMarshaller io(storage_);
uint32_t new_last_index = last_index_ + 1U;
res = io.setAndGetBack(getLastIndexKey(), new_last_index);
if (res < 0)
{
return res;
}
if (new_last_index != last_index_ + 1U)
{
return -ErrFailure;
}
entries_[new_last_index] = entry;
last_index_ = Index(new_last_index);
UAVCAN_TRACE("dynamic_node_id_server::distributed::Log", "New entry, index %u, node ID %u, term %u",
unsigned(last_index_), unsigned(entry.node_id), unsigned(entry.term));
return 0;
}
/**
* This method invokes storage IO.
* Returned value indicates whether the requested operation has been carried out successfully.
*/
int removeEntriesWhereIndexGreaterOrEqual(Index index)
{
UAVCAN_ASSERT(last_index_ < Capacity);
if (((index) >= Capacity) || (index <= 0))
{
return -ErrLogic;
}
uint32_t new_last_index = index - 1U;
tracer_.onEvent(TraceRaftLogRemove, new_last_index);
if (new_last_index != last_index_)
{
StorageMarshaller io(storage_);
int res = io.setAndGetBack(getLastIndexKey(), new_last_index);
if (res < 0)
{
return res;
}
if (new_last_index != index - 1U)
{
return -ErrFailure;
}
UAVCAN_TRACE("dynamic_node_id_server::distributed::Log", "Entries removed, last index %u --> %u",
unsigned(last_index_), unsigned(new_last_index));
last_index_ = Index(new_last_index);
}
// Removal operation leaves dangling entries in storage, it's OK
return 0;
}
int removeEntriesWhereIndexGreater(Index index)
{
return removeEntriesWhereIndexGreaterOrEqual(Index(index + 1U));
}
/**
* Returns nullptr if there's no such index.
* This method does not use storage IO.
*/
const Entry* getEntryAtIndex(Index index) const
{
UAVCAN_ASSERT(last_index_ < Capacity);
return (index <= last_index_) ? &entries_[index] : UAVCAN_NULLPTR;
}
Index getLastIndex() const { return last_index_; }
bool isOtherLogUpToDate(Index other_last_index, Term other_last_term) const
{
UAVCAN_ASSERT(last_index_ < Capacity);
// Terms are different - the one with higher term is more up-to-date
if (other_last_term != entries_[last_index_].term)
{
return other_last_term > entries_[last_index_].term;
}
// Terms are equal - longer log wins
return other_last_index >= last_index_;
}
};
}
}
}
#endif // Include guard
@@ -0,0 +1,237 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_PERSISTENT_STATE_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_PERSISTENT_STATE_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/protocol/dynamic_node_id_server/distributed/types.hpp>
#include <uavcan/protocol/dynamic_node_id_server/distributed/log.hpp>
#include <uavcan/protocol/dynamic_node_id_server/storage_marshaller.hpp>
#include <uavcan/protocol/dynamic_node_id_server/event.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
namespace distributed
{
/**
* This class is a convenient container for persistent state variables defined by Raft.
* Writes are slow, reads are instantaneous.
*/
class PersistentState
{
IStorageBackend& storage_;
IEventTracer& tracer_;
Term current_term_;
NodeID voted_for_;
Log log_;
static IStorageBackend::String getCurrentTermKey() { return "current_term"; }
static IStorageBackend::String getVotedForKey() { return "voted_for"; }
public:
PersistentState(IStorageBackend& storage, IEventTracer& tracer)
: storage_(storage)
, tracer_(tracer)
, current_term_(0)
, log_(storage, tracer)
{ }
/**
* Initialization is performed as follows (every step may fail and return an error):
* 1. Log is restored or initialized.
* 2. Current term is restored. If there was no current term stored and the log is empty, it will be initialized
* with zero.
* 3. VotedFor value is restored. If there was no VotedFor value stored, the log is empty, and the current term is
* zero, the value will be initialized with zero.
*/
int init()
{
/*
* Reading log
*/
int res = log_.init();
if (res < 0)
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::PersistentState", "Log init failed: %d", res);
return res;
}
const Entry* const last_entry = log_.getEntryAtIndex(log_.getLastIndex());
if (last_entry == UAVCAN_NULLPTR)
{
UAVCAN_ASSERT(0);
return -ErrLogic;
}
const bool log_is_empty = (log_.getLastIndex() == 0) && (last_entry->term == 0);
StorageMarshaller io(storage_);
/*
* Reading currentTerm
*/
if (storage_.get(getCurrentTermKey()).empty() && log_is_empty)
{
// First initialization
current_term_ = 0;
res = io.setAndGetBack(getCurrentTermKey(), current_term_);
if (res < 0)
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::PersistentState",
"Failed to init current term: %d", res);
return res;
}
if (current_term_ != 0)
{
return -ErrFailure;
}
}
else
{
// Restoring
res = io.get(getCurrentTermKey(), current_term_);
if (res < 0)
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::PersistentState",
"Failed to read current term: %d", res);
return res;
}
}
tracer_.onEvent(TraceRaftCurrentTermRestored, current_term_);
if (current_term_ < last_entry->term)
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::PersistentState",
"Persistent storage is damaged: current term is less than term of the last log entry (%u < %u)",
unsigned(current_term_), unsigned(last_entry->term));
return -ErrLogic;
}
/*
* Reading votedFor
*/
if (storage_.get(getVotedForKey()).empty() && log_is_empty && (current_term_ == 0))
{
// First initialization
voted_for_ = NodeID(0);
uint32_t stored_voted_for = 0;
res = io.setAndGetBack(getVotedForKey(), stored_voted_for);
if (res < 0)
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::PersistentState",
"Failed to init votedFor: %d", res);
return res;
}
if (stored_voted_for != 0)
{
return -ErrFailure;
}
}
else
{
// Restoring
uint32_t stored_voted_for = 0;
res = io.get(getVotedForKey(), stored_voted_for);
if (res < 0)
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::PersistentState",
"Failed to read votedFor: %d", res);
return res;
}
if (stored_voted_for > NodeID::Max)
{
return -ErrInvalidConfiguration;
}
voted_for_ = NodeID(uint8_t(stored_voted_for));
}
tracer_.onEvent(TraceRaftVotedForRestored, voted_for_.get());
return 0;
}
Term getCurrentTerm() const { return current_term_; }
NodeID getVotedFor() const { return voted_for_; }
bool isVotedForSet() const { return voted_for_.isUnicast(); }
Log& getLog() { return log_; }
const Log& getLog() const { return log_; }
/**
* Invokes storage IO.
*/
int setCurrentTerm(Term term)
{
if (term < current_term_)
{
UAVCAN_ASSERT(0);
return -ErrInvalidParam;
}
tracer_.onEvent(TraceRaftCurrentTermUpdate, term);
StorageMarshaller io(storage_);
Term tmp = term;
int res = io.setAndGetBack(getCurrentTermKey(), tmp);
if (res < 0)
{
return res;
}
if (tmp != term)
{
return -ErrFailure;
}
current_term_ = term;
return 0;
}
/**
* Invokes storage IO.
*/
int setVotedFor(NodeID node_id)
{
if (!node_id.isValid())
{
UAVCAN_ASSERT(0);
return -ErrInvalidParam;
}
tracer_.onEvent(TraceRaftVotedForUpdate, node_id.get());
StorageMarshaller io(storage_);
uint32_t tmp = node_id.get();
int res = io.setAndGetBack(getVotedForKey(), tmp);
if (res < 0)
{
return res;
}
if (node_id.get() != tmp)
{
return -ErrFailure;
}
voted_for_ = node_id;
return 0;
}
int resetVotedFor() { return setVotedFor(NodeID(0)); }
};
}
}
}
#endif // Include guard
@@ -0,0 +1,917 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_RAFT_CORE_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_RAFT_CORE_HPP_INCLUDED
#include <cstdlib>
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/node/timer.hpp>
#include <uavcan/node/service_server.hpp>
#include <uavcan/node/service_client.hpp>
#include <uavcan/protocol/dynamic_node_id_server/distributed/types.hpp>
#include <uavcan/protocol/dynamic_node_id_server/distributed/persistent_state.hpp>
#include <uavcan/protocol/dynamic_node_id_server/distributed/cluster_manager.hpp>
#include <uavcan/protocol/dynamic_node_id_server/event.hpp>
// UAVCAN types
#include <uavcan/protocol/dynamic_node_id/server/AppendEntries.hpp>
#include <uavcan/protocol/dynamic_node_id/server/RequestVote.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
namespace distributed
{
/**
* Allocator has to implement this interface so the RaftCore can inform it when a new entry gets committed to the log.
*/
class IRaftLeaderMonitor
{
public:
/**
* This method will be invoked when a new log entry is committed (only if the local server is the current Leader).
*/
virtual void handleLogCommitOnLeader(const Entry& committed_entry) = 0;
/**
* Invoked by the Raft core when the local node becomes a leader or ceases to be one.
* By default the local node is not leader.
* It is possible to commit to the log right from this method.
*/
virtual void handleLocalLeadershipChange(bool local_node_is_leader) = 0;
virtual ~IRaftLeaderMonitor() { }
};
/**
* This class implements log replication and voting.
* It does not implement client-server interaction at all; instead it just exposes a public method for adding
* allocation entries.
*
* Note that this class uses std::rand(), so the RNG must be properly seeded by the application.
*
* Activity registration:
* - persistent state update error
* - switch to candidate (this defines timeout between reelections)
* - newer term in response (also switch to follower)
* - append entries request with term >= currentTerm
* - vote granted
*/
class RaftCore : private TimerBase
{
public:
enum ServerState
{
ServerStateFollower,
ServerStateCandidate,
ServerStateLeader
};
private:
typedef MethodBinder<RaftCore*, void (RaftCore::*)(const ReceivedDataStructure<AppendEntries::Request>&,
ServiceResponseDataStructure<AppendEntries::Response>&)>
AppendEntriesCallback;
typedef MethodBinder<RaftCore*, void (RaftCore::*)(const ServiceCallResult<AppendEntries>&)>
AppendEntriesResponseCallback;
typedef MethodBinder<RaftCore*, void (RaftCore::*)(const ReceivedDataStructure<RequestVote::Request>&,
ServiceResponseDataStructure<RequestVote::Response>&)>
RequestVoteCallback;
typedef MethodBinder<RaftCore*, void (RaftCore::*)(const ServiceCallResult<RequestVote>&)>
RequestVoteResponseCallback;
struct PendingAppendEntriesFields
{
Log::Index prev_log_index;
Log::Index num_entries;
PendingAppendEntriesFields()
: prev_log_index(0)
, num_entries(0)
{ }
};
/*
* Constants
*/
enum { MaxNumFollowers = ClusterManager::MaxClusterSize - 1 };
IEventTracer& tracer_;
IRaftLeaderMonitor& leader_monitor_;
/*
* States
*/
PersistentState persistent_state_;
ClusterManager cluster_;
Log::Index commit_index_;
MonotonicTime last_activity_timestamp_;
MonotonicDuration randomized_activity_timeout_;
ServerState server_state_;
uint8_t next_server_index_; ///< Next server to query AE from
uint8_t num_votes_received_in_this_campaign_;
PendingAppendEntriesFields pending_append_entries_fields_;
/*
* Transport
*/
ServiceServer<AppendEntries, AppendEntriesCallback> append_entries_srv_;
ServiceClient<AppendEntries, AppendEntriesResponseCallback> append_entries_client_;
ServiceServer<RequestVote, RequestVoteCallback> request_vote_srv_;
ServiceClient<RequestVote, RequestVoteResponseCallback> request_vote_client_;
/*
* Methods
*/
void trace(TraceCode event, int64_t argument) { tracer_.onEvent(event, argument); }
INode& getNode() { return append_entries_srv_.getNode(); }
const INode& getNode() const { return append_entries_srv_.getNode(); }
void checkInvariants() const
{
// Commit index
UAVCAN_ASSERT(commit_index_ <= persistent_state_.getLog().getLastIndex());
// Term
UAVCAN_ASSERT(persistent_state_.getLog().getEntryAtIndex(persistent_state_.getLog().getLastIndex()) !=
UAVCAN_NULLPTR);
UAVCAN_ASSERT(persistent_state_.getLog().getEntryAtIndex(persistent_state_.getLog().getLastIndex())->term <=
persistent_state_.getCurrentTerm());
// Elections
UAVCAN_ASSERT(server_state_ != ServerStateCandidate || !request_vote_client_.hasPendingCalls() ||
persistent_state_.getVotedFor() == getNode().getNodeID());
UAVCAN_ASSERT(num_votes_received_in_this_campaign_ <= cluster_.getClusterSize());
// Transport
UAVCAN_ASSERT(append_entries_client_.getNumPendingCalls() <= 1);
UAVCAN_ASSERT(request_vote_client_.getNumPendingCalls() <= cluster_.getNumKnownServers());
UAVCAN_ASSERT(server_state_ != ServerStateCandidate || !append_entries_client_.hasPendingCalls());
UAVCAN_ASSERT(server_state_ != ServerStateLeader || !request_vote_client_.hasPendingCalls());
UAVCAN_ASSERT(server_state_ != ServerStateFollower ||
(!append_entries_client_.hasPendingCalls() && !request_vote_client_.hasPendingCalls()));
}
void registerActivity()
{
last_activity_timestamp_ = getNode().getMonotonicTime();
static const int32_t randomization_range_msec = AppendEntries::Request::DEFAULT_MAX_ELECTION_TIMEOUT_MS -
AppendEntries::Request::DEFAULT_MIN_ELECTION_TIMEOUT_MS;
// coverity[dont_call]
const int32_t random_msec = (std::rand() % randomization_range_msec) + 1;
randomized_activity_timeout_ =
MonotonicDuration::fromMSec(AppendEntries::Request::DEFAULT_MIN_ELECTION_TIMEOUT_MS + random_msec);
UAVCAN_ASSERT(randomized_activity_timeout_.toMSec() > AppendEntries::Request::DEFAULT_MIN_ELECTION_TIMEOUT_MS);
UAVCAN_ASSERT(randomized_activity_timeout_.toMSec() <= AppendEntries::Request::DEFAULT_MAX_ELECTION_TIMEOUT_MS);
}
bool isActivityTimedOut() const
{
return getNode().getMonotonicTime() > (last_activity_timestamp_ + randomized_activity_timeout_);
}
void handlePersistentStateUpdateError(int error)
{
UAVCAN_ASSERT(error < 0);
trace(TraceRaftPersistStateUpdateError, error);
switchState(ServerStateFollower);
registerActivity(); // Deferring reelections
}
void updateFollower()
{
if (isActivityTimedOut())
{
switchState(ServerStateCandidate);
registerActivity();
}
}
void updateCandidate()
{
if (num_votes_received_in_this_campaign_ > 0)
{
trace(TraceRaftElectionComplete, num_votes_received_in_this_campaign_);
const bool won = num_votes_received_in_this_campaign_ >= cluster_.getQuorumSize();
UAVCAN_TRACE("dynamic_node_id_server::distributed::RaftCore", "Election complete, won: %d", int(won));
switchState(won ? ServerStateLeader : ServerStateFollower); // Start over or become leader
}
else
{
// Set votedFor, abort on failure
int res = persistent_state_.setVotedFor(getNode().getNodeID());
if (res < 0)
{
handlePersistentStateUpdateError(res);
return;
}
// Increment current term, abort on failure
res = persistent_state_.setCurrentTerm(persistent_state_.getCurrentTerm() + 1U);
if (res < 0)
{
handlePersistentStateUpdateError(res);
return;
}
num_votes_received_in_this_campaign_ = 1; // Voting for self
RequestVote::Request req;
req.last_log_index = persistent_state_.getLog().getLastIndex();
req.last_log_term = persistent_state_.getLog().getEntryAtIndex(req.last_log_index)->term;
req.term = persistent_state_.getCurrentTerm();
for (uint8_t i = 0; i < MaxNumFollowers; i++)
{
const NodeID node_id = cluster_.getRemoteServerNodeIDAtIndex(i);
if (!node_id.isUnicast())
{
break;
}
UAVCAN_TRACE("dynamic_node_id_server::distributed::RaftCore",
"Requesting vote from %d", int(node_id.get()));
trace(TraceRaftVoteRequestInitiation, node_id.get());
res = request_vote_client_.call(node_id, req);
if (res < 0)
{
trace(TraceError, res);
}
}
}
}
void updateLeader()
{
if (append_entries_client_.hasPendingCalls())
{
append_entries_client_.cancelAllCalls(); // Refer to the response callback to learn why
}
if (cluster_.getClusterSize() > 1)
{
const NodeID node_id = cluster_.getRemoteServerNodeIDAtIndex(next_server_index_);
UAVCAN_ASSERT(node_id.isUnicast());
next_server_index_++;
if (next_server_index_ >= cluster_.getNumKnownServers())
{
next_server_index_ = 0;
}
AppendEntries::Request req;
req.term = persistent_state_.getCurrentTerm();
req.leader_commit = commit_index_;
req.prev_log_index = Log::Index(cluster_.getServerNextIndex(node_id) - 1U);
const Entry* const entry = persistent_state_.getLog().getEntryAtIndex(req.prev_log_index);
if (entry == UAVCAN_NULLPTR)
{
UAVCAN_ASSERT(0);
handlePersistentStateUpdateError(-ErrLogic);
return;
}
req.prev_log_term = entry->term;
for (Log::Index index = cluster_.getServerNextIndex(node_id);
index <= persistent_state_.getLog().getLastIndex();
index++)
{
req.entries.push_back(*persistent_state_.getLog().getEntryAtIndex(index));
if (req.entries.size() == req.entries.capacity())
{
break;
}
}
pending_append_entries_fields_.num_entries = req.entries.size();
pending_append_entries_fields_.prev_log_index = req.prev_log_index;
const int res = append_entries_client_.call(node_id, req);
if (res < 0)
{
trace(TraceRaftAppendEntriesCallFailure, res);
}
}
propagateCommitIndex();
}
void switchState(ServerState new_state)
{
if (server_state_ == new_state)
{
return;
}
/*
* Logging
*/
UAVCAN_TRACE("dynamic_node_id_server::distributed::RaftCore", "State switch: %d --> %d",
int(server_state_), int(new_state));
trace(TraceRaftStateSwitch, new_state);
/*
* Updating the current state
*/
const ServerState old_state = server_state_;
server_state_ = new_state;
/*
* Resetting specific states
*/
cluster_.resetAllServerIndices();
next_server_index_ = 0;
num_votes_received_in_this_campaign_ = 0;
request_vote_client_.cancelAllCalls();
append_entries_client_.cancelAllCalls();
/*
* Calling the switch handler
* Note that the handler may commit to the log directly
*/
if ((old_state == ServerStateLeader) ||
(new_state == ServerStateLeader))
{
leader_monitor_.handleLocalLeadershipChange(new_state == ServerStateLeader);
}
}
void tryIncrementCurrentTermFromResponse(Term new_term)
{
trace(TraceRaftNewerTermInResponse, new_term);
const int res = persistent_state_.setCurrentTerm(new_term);
if (res < 0)
{
trace(TraceRaftPersistStateUpdateError, res);
}
registerActivity(); // Deferring future elections
switchState(ServerStateFollower);
}
void propagateCommitIndex()
{
// Objective is to estimate whether we can safely increment commit index value
UAVCAN_ASSERT(server_state_ == ServerStateLeader);
UAVCAN_ASSERT(commit_index_ <= persistent_state_.getLog().getLastIndex());
if (commit_index_ < persistent_state_.getLog().getLastIndex())
{
/*
* Not all local entries are committed.
* Deciding if it is safe to increment commit index.
*/
uint8_t num_nodes_with_next_log_entry_available = 1; // Local node
for (uint8_t i = 0; i < cluster_.getNumKnownServers(); i++)
{
const Log::Index match_index = cluster_.getServerMatchIndex(cluster_.getRemoteServerNodeIDAtIndex(i));
if (match_index > commit_index_)
{
num_nodes_with_next_log_entry_available++;
}
}
if (num_nodes_with_next_log_entry_available >= cluster_.getQuorumSize())
{
commit_index_++;
UAVCAN_ASSERT(commit_index_ > 0); // Index 0 is always committed
trace(TraceRaftNewEntryCommitted, commit_index_);
// AT THIS POINT ALLOCATION IS COMPLETE
leader_monitor_.handleLogCommitOnLeader(*persistent_state_.getLog().getEntryAtIndex(commit_index_));
}
}
}
void handleAppendEntriesRequest(const ReceivedDataStructure<AppendEntries::Request>& request,
ServiceResponseDataStructure<AppendEntries::Response>& response)
{
checkInvariants();
if (!cluster_.isKnownServer(request.getSrcNodeID()))
{
if (cluster_.isClusterDiscovered())
{
trace(TraceRaftRequestIgnored, request.getSrcNodeID().get());
response.setResponseEnabled(false);
return;
}
else
{
cluster_.addServer(request.getSrcNodeID());
}
}
UAVCAN_ASSERT(response.isResponseEnabled()); // This is default
/*
* Checking if our current state is up to date.
* The request will be ignored if persistent state cannot be updated.
*/
if (request.term > persistent_state_.getCurrentTerm())
{
int res = persistent_state_.setCurrentTerm(request.term);
if (res < 0)
{
handlePersistentStateUpdateError(res);
response.setResponseEnabled(false);
return;
}
res = persistent_state_.resetVotedFor();
if (res < 0)
{
handlePersistentStateUpdateError(res);
response.setResponseEnabled(false);
return;
}
}
/*
* Preparing the response
*/
response.term = persistent_state_.getCurrentTerm();
response.success = false;
/*
* Step 1 (see Raft paper)
* Reject the request if the leader has stale term number.
*/
if (request.term < persistent_state_.getCurrentTerm())
{
response.setResponseEnabled(true);
return;
}
registerActivity();
switchState(ServerStateFollower);
/*
* Step 2
* Reject the request if the assumed log index does not exist on the local node.
*/
const Entry* const prev_entry = persistent_state_.getLog().getEntryAtIndex(request.prev_log_index);
if (prev_entry == UAVCAN_NULLPTR)
{
response.setResponseEnabled(true);
return;
}
/*
* Step 3
* Drop log entries if term number does not match.
* Ignore the request if the persistent state cannot be updated.
*/
if (prev_entry->term != request.prev_log_term)
{
const int res = persistent_state_.getLog().removeEntriesWhereIndexGreaterOrEqual(request.prev_log_index);
response.setResponseEnabled(res >= 0);
if (res < 0)
{
trace(TraceRaftPersistStateUpdateError, res);
}
return;
}
/*
* Step 4
* Update the log with new entries - this will possibly require to rewrite existing entries.
* Ignore the request if the persistent state cannot be updated.
*/
if (request.prev_log_index != persistent_state_.getLog().getLastIndex())
{
const int res = persistent_state_.getLog().removeEntriesWhereIndexGreater(request.prev_log_index);
if (res < 0)
{
trace(TraceRaftPersistStateUpdateError, res);
response.setResponseEnabled(false);
return;
}
}
for (uint8_t i = 0; i < request.entries.size(); i++)
{
const int res = persistent_state_.getLog().append(request.entries[i]);
if (res < 0)
{
trace(TraceRaftPersistStateUpdateError, res);
response.setResponseEnabled(false);
return; // Response will not be sent, the server will assume that we're dead
}
}
/*
* Step 5
* Update the commit index.
*/
if (request.leader_commit > commit_index_)
{
commit_index_ = min(request.leader_commit, persistent_state_.getLog().getLastIndex());
trace(TraceRaftCommitIndexUpdate, commit_index_);
}
response.setResponseEnabled(true);
response.success = true;
}
void handleAppendEntriesResponse(const ServiceCallResult<AppendEntries>& result)
{
UAVCAN_ASSERT(server_state_ == ServerStateLeader); // When state switches, all requests must be cancelled
checkInvariants();
if (!result.isSuccessful())
{
return;
}
if (result.getResponse().term > persistent_state_.getCurrentTerm())
{
tryIncrementCurrentTermFromResponse(result.getResponse().term);
}
else
{
if (result.getResponse().success)
{
cluster_.incrementServerNextIndexBy(result.getCallID().server_node_id,
pending_append_entries_fields_.num_entries);
cluster_.setServerMatchIndex(result.getCallID().server_node_id,
Log::Index(pending_append_entries_fields_.prev_log_index +
pending_append_entries_fields_.num_entries));
}
else
{
cluster_.decrementServerNextIndex(result.getCallID().server_node_id);
trace(TraceRaftAppendEntriesRespUnsucfl, result.getCallID().server_node_id.get());
}
}
pending_append_entries_fields_ = PendingAppendEntriesFields();
// Rest of the logic is implemented in periodic update handlers.
}
void handleRequestVoteRequest(const ReceivedDataStructure<RequestVote::Request>& request,
ServiceResponseDataStructure<RequestVote::Response>& response)
{
checkInvariants();
trace(TraceRaftVoteRequestReceived, request.getSrcNodeID().get());
if (!cluster_.isKnownServer(request.getSrcNodeID()))
{
trace(TraceRaftRequestIgnored, request.getSrcNodeID().get());
response.setResponseEnabled(false);
return;
}
UAVCAN_ASSERT(response.isResponseEnabled()); // This is default
/*
* Checking if our current state is up to date.
* The request will be ignored if persistent state cannot be updated.
*/
if (request.term > persistent_state_.getCurrentTerm())
{
switchState(ServerStateFollower); // Our term is stale, so we can't serve as leader
int res = persistent_state_.setCurrentTerm(request.term);
if (res < 0)
{
handlePersistentStateUpdateError(res);
response.setResponseEnabled(false);
return;
}
res = persistent_state_.resetVotedFor();
if (res < 0)
{
handlePersistentStateUpdateError(res);
response.setResponseEnabled(false);
return;
}
}
/*
* Preparing the response
*/
response.term = persistent_state_.getCurrentTerm();
if (request.term < response.term)
{
response.vote_granted = false;
}
else
{
const bool can_vote = !persistent_state_.isVotedForSet() ||
(persistent_state_.getVotedFor() == request.getSrcNodeID());
const bool log_is_up_to_date =
persistent_state_.getLog().isOtherLogUpToDate(request.last_log_index, request.last_log_term);
response.vote_granted = can_vote && log_is_up_to_date;
if (response.vote_granted)
{
switchState(ServerStateFollower); // Avoiding race condition when Candidate
registerActivity(); // This is necessary to avoid excessive elections
const int res = persistent_state_.setVotedFor(request.getSrcNodeID());
if (res < 0)
{
trace(TraceRaftPersistStateUpdateError, res);
response.setResponseEnabled(false);
return;
}
}
}
}
void handleRequestVoteResponse(const ServiceCallResult<RequestVote>& result)
{
UAVCAN_ASSERT(server_state_ == ServerStateCandidate); // When state switches, all requests must be cancelled
checkInvariants();
if (!result.isSuccessful())
{
return;
}
trace(TraceRaftVoteRequestSucceeded, result.getCallID().server_node_id.get());
if (result.getResponse().term > persistent_state_.getCurrentTerm())
{
tryIncrementCurrentTermFromResponse(result.getResponse().term);
}
else
{
if (result.getResponse().vote_granted)
{
num_votes_received_in_this_campaign_++;
}
}
// Rest of the logic is implemented in periodic update handlers.
// I'm no fan of asynchronous programming. At all.
}
virtual void handleTimerEvent(const TimerEvent&)
{
checkInvariants();
switch (server_state_)
{
case ServerStateFollower:
{
updateFollower();
break;
}
case ServerStateCandidate:
{
updateCandidate();
break;
}
case ServerStateLeader:
{
updateLeader();
break;
}
default:
{
UAVCAN_ASSERT(0);
break;
}
}
}
public:
RaftCore(INode& node,
IStorageBackend& storage,
IEventTracer& tracer,
IRaftLeaderMonitor& leader_monitor)
: TimerBase(node)
, tracer_(tracer)
, leader_monitor_(leader_monitor)
, persistent_state_(storage, tracer)
, cluster_(node, storage, persistent_state_.getLog(), tracer)
, commit_index_(0) // Per Raft paper, commitIndex must be initialized to zero
, last_activity_timestamp_(node.getMonotonicTime())
, randomized_activity_timeout_(
MonotonicDuration::fromMSec(AppendEntries::Request::DEFAULT_MAX_ELECTION_TIMEOUT_MS))
, server_state_(ServerStateFollower)
, next_server_index_(0)
, num_votes_received_in_this_campaign_(0)
, append_entries_srv_(node)
, append_entries_client_(node)
, request_vote_srv_(node)
, request_vote_client_(node)
{ }
/**
* Once started, the logic runs in the background until destructor is called.
* @param cluster_size If set, this value will be used and stored in the persistent storage. If not set,
* value from the persistent storage will be used. If not set and there's no such key
* in the persistent storage, initialization will fail.
*/
int init(const uint8_t cluster_size, const TransferPriority priority)
{
/*
* Initializing state variables
*/
server_state_ = ServerStateFollower;
next_server_index_ = 0;
num_votes_received_in_this_campaign_ = 0;
commit_index_ = 0;
registerActivity();
/*
* Initializing internals
*/
int res = persistent_state_.init();
if (res < 0)
{
return res;
}
res = cluster_.init(cluster_size, priority);
if (res < 0)
{
return res;
}
res = append_entries_srv_.start(AppendEntriesCallback(this, &RaftCore::handleAppendEntriesRequest));
if (res < 0)
{
return res;
}
res = request_vote_srv_.start(RequestVoteCallback(this, &RaftCore::handleRequestVoteRequest));
if (res < 0)
{
return res;
}
res = append_entries_client_.init(priority);
if (res < 0)
{
return res;
}
append_entries_client_.setCallback(AppendEntriesResponseCallback(this,
&RaftCore::handleAppendEntriesResponse));
res = request_vote_client_.init(priority);
if (res < 0)
{
return res;
}
request_vote_client_.setCallback(RequestVoteResponseCallback(this, &RaftCore::handleRequestVoteResponse));
/*
* Initializing timing constants
* Refer to the specification for the formula
*/
const uint8_t num_followers = static_cast<uint8_t>(cluster_.getClusterSize() - 1);
const MonotonicDuration update_interval =
MonotonicDuration::fromMSec(AppendEntries::Request::DEFAULT_MIN_ELECTION_TIMEOUT_MS /
2 / max(static_cast<uint8_t>(2), num_followers));
UAVCAN_TRACE("dynamic_node_id_server::distributed::RaftCore",
"Update interval: %ld msec", static_cast<long>(update_interval.toMSec()));
append_entries_client_.setRequestTimeout(min(append_entries_client_.getDefaultRequestTimeout(),
update_interval));
request_vote_client_.setRequestTimeout(min(request_vote_client_.getDefaultRequestTimeout(),
update_interval));
startPeriodic(update_interval);
trace(TraceRaftCoreInited, update_interval.toUSec());
UAVCAN_ASSERT(res >= 0);
return 0;
}
/**
* This function is mostly needed for testing.
*/
Log::Index getCommitIndex() const { return commit_index_; }
/**
* This essentially indicates whether the server could replicate log since last allocation.
*/
bool areAllLogEntriesCommitted() const { return commit_index_ == persistent_state_.getLog().getLastIndex(); }
/**
* Only the leader can call @ref appendLog().
*/
bool isLeader() const { return server_state_ == ServerStateLeader; }
/**
* Inserts one entry into log.
* This method will trigger an assertion failure and return error if the current node is not the leader.
* If operation fails, the node may give up its Leader status.
*/
void appendLog(const Entry::FieldTypes::unique_id& unique_id, NodeID node_id)
{
if (isLeader())
{
Entry entry;
entry.node_id = node_id.get();
entry.unique_id = unique_id;
entry.term = persistent_state_.getCurrentTerm();
trace(TraceRaftNewLogEntry, entry.node_id);
const int res = persistent_state_.getLog().append(entry);
if (res < 0)
{
handlePersistentStateUpdateError(res);
}
}
else
{
UAVCAN_ASSERT(0);
}
}
/**
* This class is used to perform log searches.
*/
struct LogEntryInfo
{
Entry entry;
bool committed;
LogEntryInfo(const Entry& arg_entry, bool arg_committed)
: entry(arg_entry)
, committed(arg_committed)
{ }
};
/**
* This method is used by the allocator to query existence of certain entries in the Raft log.
* Predicate is a callable of the following prototype:
* bool (const LogEntryInfo& entry)
* Once the predicate returns true, the loop will be terminated and the method will return an initialized lazy
* contructor with the last visited entry; otherwise the constructor will not be initialized.
* In this case, lazy constructor is used as boost::optional.
* The log is always traversed from HIGH to LOW index values, i.e. entry 0 will be traversed last.
*/
template <typename Predicate>
inline LazyConstructor<LogEntryInfo> traverseLogFromEndUntil(const Predicate& predicate) const
{
UAVCAN_ASSERT(coerceOrFallback<bool>(predicate, true));
for (int index = static_cast<int>(persistent_state_.getLog().getLastIndex()); index >= 0; index--)
{
const Entry* const entry = persistent_state_.getLog().getEntryAtIndex(Log::Index(index));
UAVCAN_ASSERT(entry != UAVCAN_NULLPTR);
const LogEntryInfo info(*entry, Log::Index(index) <= commit_index_);
if (predicate(info))
{
LazyConstructor<LogEntryInfo> ret;
ret.template construct<const LogEntryInfo&>(info);
return ret;
}
}
return LazyConstructor<LogEntryInfo>();
}
Log::Index getNumAllocations() const
{
// Remember that index zero contains a special-purpose entry that doesn't count as allocation
return persistent_state_.getLog().getLastIndex();
}
/**
* These accessors are needed for debugging, visualization and testing.
*/
const PersistentState& getPersistentState() const { return persistent_state_; }
const ClusterManager& getClusterManager() const { return cluster_; }
MonotonicTime getLastActivityTimestamp() const { return last_activity_timestamp_; }
ServerState getServerState() const { return server_state_; }
MonotonicDuration getUpdateInterval() const { return getPeriod(); }
MonotonicDuration getRandomizedTimeout() const { return randomized_activity_timeout_; }
};
}
}
}
#endif // Include guard
@@ -0,0 +1,354 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_SERVER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_SERVER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/protocol/dynamic_node_id_server/distributed/types.hpp>
#include <uavcan/protocol/dynamic_node_id_server/distributed/raft_core.hpp>
#include <uavcan/protocol/dynamic_node_id_server/abstract_server.hpp>
#include <uavcan/protocol/dynamic_node_id_server/node_id_selector.hpp>
#include <uavcan/protocol/dynamic_node_id_server/event.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
namespace distributed
{
/**
* This class implements the top-level allocation logic and server API.
*/
class UAVCAN_EXPORT Server : public AbstractServer
, IRaftLeaderMonitor
{
struct UniqueIDLogPredicate
{
const UniqueID unique_id;
UniqueIDLogPredicate(const UniqueID& uid)
: unique_id(uid)
{ }
bool operator()(const RaftCore::LogEntryInfo& info) const
{
return info.entry.unique_id == unique_id;
}
};
struct NodeIDLogPredicate
{
const NodeID node_id;
NodeIDLogPredicate(const NodeID& nid)
: node_id(nid)
{ }
bool operator()(const RaftCore::LogEntryInfo& info) const
{
return info.entry.node_id == node_id.get();
}
};
/*
* States
*/
RaftCore raft_core_;
/*
* Methods of IAllocationRequestHandler
*/
virtual bool canPublishFollowupAllocationResponse() const
{
/*
* The server is allowed to publish follow-up allocation responses only if both conditions are met:
* - The server is leader.
* - The last allocation request has been completed successfully.
*
* Why second condition? Imagine a case when there's two Raft nodes that don't hear each other - A and B,
* both of them are leaders (but only A can commit to the log, B is in a minor partition); then there's a
* client X that can exchange with both leaders, and a client Y that can exchange only with A. Such a
* situation can occur in case of a very unlikely failure of redundant interfaces.
*
* Both clients X and Y initially send a first-stage Allocation request; A responds to Y with a first-stage
* response, whereas B responds to X. Both X and Y will issue a follow-up second-stage requests, which may
* cause A to mix second-stage Allocation requests from different nodes, leading to reception of an invalid
* unique ID. When both leaders receive full unique IDs (A will receive an invalid one, B will receive a valid
* unique ID of X), only A will be able to make a commit, because B is in a minority. Since both clients were
* unable to receive node ID values in this round, they will try again later.
*
* Now, in order to prevent B from disrupting client-server communication second time around, we introduce this
* second restriction: the server cannot exchange with clients as long as its log contains uncommitted entries.
*
* Note that this restriction does not apply to allocation requests sent via CAN FD frames, as in this case
* no follow-up responses are necessary. So only CAN FD can offer reliable Allocation exchange.
*/
return raft_core_.isLeader() && raft_core_.areAllLogEntriesCommitted();
}
virtual void handleAllocationRequest(const UniqueID& unique_id, const NodeID preferred_node_id)
{
/*
* Note that it is possible that the local node is not leader. We will still perform the log search
* and try to find the node that requested allocation. If the node is found, response will be sent;
* otherwise the request will be ignored because only leader can add new allocations.
*/
const LazyConstructor<RaftCore::LogEntryInfo> result =
raft_core_.traverseLogFromEndUntil(UniqueIDLogPredicate(unique_id));
if (result.isConstructed())
{
if (result->committed)
{
tryPublishAllocationResult(result->entry);
UAVCAN_TRACE("dynamic_node_id_server::distributed::Server",
"Allocation request served with existing allocation; node ID %d",
int(result->entry.node_id));
}
else
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::Server",
"Allocation request ignored - allocation exists but not committed yet; node ID %d",
int(result->entry.node_id));
}
}
else
{
if (raft_core_.isLeader() && !node_discoverer_.hasUnknownNodes())
{
allocateNewNode(unique_id, preferred_node_id);
}
}
}
/*
* Methods of INodeDiscoveryHandler
*/
virtual bool canDiscoverNewNodes() const
{
return raft_core_.isLeader();
}
virtual NodeAwareness checkNodeAwareness(NodeID node_id) const
{
const LazyConstructor<RaftCore::LogEntryInfo> result =
raft_core_.traverseLogFromEndUntil(NodeIDLogPredicate(node_id));
if (result.isConstructed())
{
return result->committed ? NodeAwarenessKnownAndCommitted : NodeAwarenessKnownButNotCommitted;
}
else
{
return NodeAwarenessUnknown;
}
}
virtual void handleNewNodeDiscovery(const UniqueID* unique_id_or_null, NodeID node_id)
{
if (raft_core_.traverseLogFromEndUntil(NodeIDLogPredicate(node_id)).isConstructed())
{
UAVCAN_ASSERT(0); // Such node is already known, the class that called this method should have known that
return;
}
const UniqueID uid = (unique_id_or_null == UAVCAN_NULLPTR) ? UniqueID() : *unique_id_or_null;
if (raft_core_.isLeader())
{
raft_core_.appendLog(uid, node_id);
}
}
/*
* Methods of IRaftLeaderMonitor
*/
virtual void handleLogCommitOnLeader(const protocol::dynamic_node_id::server::Entry& entry)
{
/*
* Maybe this node did not request allocation at all, we don't care, we publish anyway.
*/
tryPublishAllocationResult(entry);
}
virtual void handleLocalLeadershipChange(bool local_node_is_leader)
{
if (!local_node_is_leader)
{
return;
}
const LazyConstructor<RaftCore::LogEntryInfo> result =
raft_core_.traverseLogFromEndUntil(NodeIDLogPredicate(node_.getNodeID()));
if (!result.isConstructed())
{
raft_core_.appendLog(getOwnUniqueID(), node_.getNodeID());
}
}
/*
* Private methods
*/
bool isNodeIDTaken(const NodeID node_id) const
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::Server",
"Testing if node ID %d is taken", int(node_id.get()));
return raft_core_.traverseLogFromEndUntil(NodeIDLogPredicate(node_id));
}
void allocateNewNode(const UniqueID& unique_id, const NodeID preferred_node_id)
{
const NodeID allocated_node_id =
NodeIDSelector<Server>(this, &Server::isNodeIDTaken).findFreeNodeID(preferred_node_id);
if (!allocated_node_id.isUnicast())
{
UAVCAN_TRACE("dynamic_node_id_server::distributed::Server", "Request ignored - no free node ID left");
return;
}
UAVCAN_TRACE("dynamic_node_id_server::distributed::Server", "New node ID allocated: %d",
int(allocated_node_id.get()));
raft_core_.appendLog(unique_id, allocated_node_id);
}
void tryPublishAllocationResult(const protocol::dynamic_node_id::server::Entry& entry)
{
const int res = allocation_request_manager_.broadcastAllocationResponse(entry.unique_id, entry.node_id);
if (res < 0)
{
tracer_.onEvent(TraceError, res);
node_.registerInternalFailure("Dynamic allocation response");
}
}
public:
Server(INode& node,
IStorageBackend& storage,
IEventTracer& tracer)
: AbstractServer(node, tracer)
, raft_core_(node, storage, tracer, *this)
{ }
int init(const UniqueID& own_unique_id,
const uint8_t cluster_size = ClusterManager::ClusterSizeUnknown,
const TransferPriority priority = TransferPriority::OneHigherThanLowest)
{
/*
* Initializing Raft core first, because the next step requires Log to be loaded
*/
int res = raft_core_.init(cluster_size, priority);
if (res < 0)
{
return res;
}
/*
* Common logic
*/
res = AbstractServer::init(own_unique_id, priority);
if (res < 0)
{
return res;
}
/*
* Making sure that the server is started with the same node ID
*/
const LazyConstructor<RaftCore::LogEntryInfo> own_log_entry =
raft_core_.traverseLogFromEndUntil(NodeIDLogPredicate(node_.getNodeID()));
if (own_log_entry.isConstructed())
{
if (own_log_entry->entry.unique_id != getOwnUniqueID())
{
return -ErrInvalidConfiguration;
}
}
return 0;
}
Log::Index getNumAllocations() const { return raft_core_.getNumAllocations(); }
/**
* These accessors are needed for debugging, visualization and testing.
*/
const RaftCore& getRaftCore() const { return raft_core_; }
};
/**
* This structure represents immediate state of the server.
* It can be used for state visualization and debugging.
*/
struct StateReport
{
uint8_t cluster_size;
RaftCore::ServerState state;
Log::Index last_log_index;
Log::Index commit_index;
Term last_log_term;
Term current_term;
NodeID voted_for;
MonotonicTime last_activity_timestamp;
MonotonicDuration randomized_timeout;
uint8_t num_unknown_nodes;
struct FollowerState
{
NodeID node_id;
Log::Index next_index;
Log::Index match_index;
FollowerState()
: next_index(0)
, match_index(0)
{ }
} followers[ClusterManager::MaxClusterSize - 1];
StateReport(const Server& s)
: cluster_size (s.getRaftCore().getClusterManager().getClusterSize())
, state (s.getRaftCore().getServerState())
, last_log_index (s.getRaftCore().getPersistentState().getLog().getLastIndex())
, commit_index (s.getRaftCore().getCommitIndex())
, last_log_term (0) // See below
, current_term (s.getRaftCore().getPersistentState().getCurrentTerm())
, voted_for (s.getRaftCore().getPersistentState().getVotedFor())
, last_activity_timestamp(s.getRaftCore().getLastActivityTimestamp())
, randomized_timeout (s.getRaftCore().getRandomizedTimeout())
, num_unknown_nodes (s.getNodeDiscoverer().getNumUnknownNodes())
{
const Entry* const e = s.getRaftCore().getPersistentState().getLog().getEntryAtIndex(last_log_index);
UAVCAN_ASSERT(e != UAVCAN_NULLPTR);
if (e != UAVCAN_NULLPTR)
{
last_log_term = e->term;
}
for (uint8_t i = 0; i < (cluster_size - 1U); i++)
{
const ClusterManager& mgr = s.getRaftCore().getClusterManager();
const NodeID node_id = mgr.getRemoteServerNodeIDAtIndex(i);
if (node_id.isUnicast())
{
followers[i].node_id = node_id;
followers[i].next_index = mgr.getServerNextIndex(node_id);
followers[i].match_index = mgr.getServerMatchIndex(node_id);
}
}
}
};
}
}
}
#endif // Include guard
@@ -0,0 +1,29 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_TYPES_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_DISTRIBUTED_TYPES_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/protocol/dynamic_node_id_server/types.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
namespace distributed
{
using namespace ::uavcan::protocol::dynamic_node_id::server;
/**
* Raft term
*/
typedef StorageType<Entry::FieldTypes::term>::Type Term;
}
}
}
#endif // Include guard
@@ -0,0 +1,176 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_EVENT_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_EVENT_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/protocol/dynamic_node_id_server/types.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
/**
* @ref IEventTracer.
* Event codes cannot be changed, only new ones can be added.
*/
enum UAVCAN_EXPORT TraceCode
{
// Event name Argument
// 0
TraceError, // error code (may be negated)
TraceRaftLogLastIndexRestored, // recovered last index value
TraceRaftLogAppend, // index of new entry
TraceRaftLogRemove, // new last index value
TraceRaftCurrentTermRestored, // current term
// 5
TraceRaftCurrentTermUpdate, // current term
TraceRaftVotedForRestored, // value of votedFor
TraceRaftVotedForUpdate, // value of votedFor
TraceRaftDiscoveryBroadcast, // number of known servers
TraceRaftNewServerDiscovered, // node ID of the new server
// 10
TraceRaftDiscoveryReceived, // node ID of the sender
TraceRaftClusterSizeInited, // cluster size
TraceRaftBadClusterSizeReceived, // received cluster size
TraceRaftCoreInited, // update interval in usec
TraceRaftStateSwitch, // 0 - Follower, 1 - Candidate, 2 - Leader
// 15
Trace0,
TraceRaftNewLogEntry, // node ID value
TraceRaftRequestIgnored, // node ID of the client
TraceRaftVoteRequestReceived, // node ID of the client
TraceRaftVoteRequestSucceeded, // node ID of the server
// 20
TraceRaftVoteRequestInitiation, // node ID of the server
TraceRaftPersistStateUpdateError, // negative error code
TraceRaftCommitIndexUpdate, // new commit index value
TraceRaftNewerTermInResponse, // new term value
TraceRaftNewEntryCommitted, // new commit index value
// 25
TraceRaftAppendEntriesCallFailure, // error code (may be negated)
TraceRaftElectionComplete, // number of votes collected
TraceRaftAppendEntriesRespUnsucfl, // node ID of the client
Trace2,
Trace3,
// 30
TraceAllocationFollowupResponse, // number of unique ID bytes in this response
TraceAllocationFollowupDenied, // reason code (see sources for details)
TraceAllocationFollowupTimeout, // timeout value in microseconds
TraceAllocationBadRequest, // number of unique ID bytes in this request
TraceAllocationUnexpectedStage, // stage number in the request - 1, 2, or 3
// 35
TraceAllocationRequestAccepted, // number of bytes of unique ID after request
TraceAllocationExchangeComplete, // first 8 bytes of unique ID interpreted as signed 64 bit big endian
TraceAllocationResponse, // allocated node ID
TraceAllocationActivity, // source node ID of the message
Trace12,
// 40
TraceDiscoveryNewNodeFound, // node ID
TraceDiscoveryCommitCacheUpdated, // node ID marked as committed
TraceDiscoveryNodeFinalized, // node ID in lower 7 bits, bit 8 (256, 0x100) is set if unique ID is known
TraceDiscoveryGetNodeInfoFailure, // node ID
TraceDiscoveryTimerStart, // interval in microseconds
// 45
TraceDiscoveryTimerStop, // reason code (see sources for details)
TraceDiscoveryGetNodeInfoRequest, // target node ID
TraceDiscoveryNodeRestartDetected, // node ID
TraceDiscoveryNodeRemoved, // node ID
Trace22,
// 50
NumTraceCodes
};
/**
* This interface allows the application to trace events that happen in the server.
*/
class UAVCAN_EXPORT IEventTracer
{
public:
#if UAVCAN_TOSTRING
/**
* It is safe to call this function with any argument.
* If the event code is out of range, an assertion failure will be triggered and an error text will be returned.
*/
static const char* getEventName(TraceCode code)
{
// import re;m = lambda s:',\n'.join('"%s"' % x for x in re.findall(r'\ {4}Trace[0-9]*([A-Za-z0-9]*),',s))
static const char* const Strings[] =
{
"Error",
"RaftLogLastIndexRestored",
"RaftLogAppend",
"RaftLogRemove",
"RaftCurrentTermRestored",
"RaftCurrentTermUpdate",
"RaftVotedForRestored",
"RaftVotedForUpdate",
"RaftDiscoveryBroadcast",
"RaftNewServerDiscovered",
"RaftDiscoveryReceived",
"RaftClusterSizeInited",
"RaftBadClusterSizeReceived",
"RaftCoreInited",
"RaftStateSwitch",
"",
"RaftNewLogEntry",
"RaftRequestIgnored",
"RaftVoteRequestReceived",
"RaftVoteRequestSucceeded",
"RaftVoteRequestInitiation",
"RaftPersistStateUpdateError",
"RaftCommitIndexUpdate",
"RaftNewerTermInResponse",
"RaftNewEntryCommitted",
"RaftAppendEntriesCallFailure",
"RaftElectionComplete",
"RaftAppendEntriesRespUnsucfl",
"",
"",
"AllocationFollowupResponse",
"AllocationFollowupDenied",
"AllocationFollowupTimeout",
"AllocationBadRequest",
"AllocationUnexpectedStage",
"AllocationRequestAccepted",
"AllocationExchangeComplete",
"AllocationResponse",
"AllocationActivity",
"",
"DiscoveryNewNodeFound",
"DiscoveryCommitCacheUpdated",
"DiscoveryNodeFinalized",
"DiscoveryGetNodeInfoFailure",
"DiscoveryTimerStart",
"DiscoveryTimerStop",
"DiscoveryGetNodeInfoRequest",
"DiscoveryNodeRestartDetected",
"DiscoveryNodeRemoved",
""
};
uavcan::StaticAssert<sizeof(Strings) / sizeof(Strings[0]) == NumTraceCodes>::check();
UAVCAN_ASSERT(code < NumTraceCodes);
// coverity[dead_error_line]
return (code < NumTraceCodes) ? Strings[static_cast<unsigned>(code)] : "INVALID_EVENT_CODE";
}
#endif
/**
* The server invokes this method every time it believes that a noteworthy event has happened.
* It is guaranteed that event code values will never change, but new ones can be added in future. This ensures
* full backward compatibility.
* @param event_code Event code, see the sources for the enum with values.
* @param event_argument Value associated with the event; its meaning depends on the event code.
*/
virtual void onEvent(TraceCode event_code, int64_t event_argument) = 0;
virtual ~IEventTracer() { }
};
}
}
#endif // Include guard
@@ -0,0 +1,348 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_NODE_DISCOVERER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_NODE_DISCOVERER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/util/map.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/util/bitset.hpp>
#include <uavcan/node/timer.hpp>
#include <uavcan/node/subscriber.hpp>
#include <uavcan/node/service_client.hpp>
#include <uavcan/protocol/dynamic_node_id_server/types.hpp>
#include <uavcan/protocol/dynamic_node_id_server/event.hpp>
#include <cassert>
// UAVCAN types
#include <uavcan/protocol/NodeStatus.hpp>
#include <uavcan/protocol/GetNodeInfo.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
/**
* The allocator must implement this interface.
*/
class INodeDiscoveryHandler
{
public:
/**
* In order to avoid bus congestion, normally only leader can discover new nodes.
*/
virtual bool canDiscoverNewNodes() const = 0;
/**
* These values represent the level of awareness of a certain node by the server.
*/
enum NodeAwareness
{
NodeAwarenessUnknown,
NodeAwarenessKnownButNotCommitted,
NodeAwarenessKnownAndCommitted
};
/**
* It is OK to do full log traversal in this method, because the unique ID collector will cache the
* result when possible.
*/
virtual NodeAwareness checkNodeAwareness(NodeID node_id) const = 0;
/**
* This method will be called when a new node responds to GetNodeInfo request.
* If this method fails to register the node, the node will be queried again later and this method will be
* invoked again.
* Unique ID will be UAVCAN_NULLPTR if the node is assumed to not implement the GetNodeInfo service.
*/
virtual void handleNewNodeDiscovery(const UniqueID* unique_id_or_null, NodeID node_id) = 0;
virtual ~INodeDiscoveryHandler() { }
};
/**
* This class listens to NodeStatus messages from other nodes and retrieves their unique ID if they are not
* known to the allocator.
*/
class NodeDiscoverer : TimerBase
{
typedef MethodBinder<NodeDiscoverer*, void (NodeDiscoverer::*)(const ServiceCallResult<protocol::GetNodeInfo>&)>
GetNodeInfoResponseCallback;
typedef MethodBinder<NodeDiscoverer*, void (NodeDiscoverer::*)(const ReceivedDataStructure<protocol::NodeStatus>&)>
NodeStatusCallback;
struct NodeData
{
uint32_t last_seen_uptime;
uint8_t num_get_node_info_attempts;
NodeData()
: last_seen_uptime(0)
, num_get_node_info_attempts(0)
{ }
};
typedef Map<NodeID, NodeData> NodeMap;
/**
* When this number of attempts has been made, the discoverer will give up and assume that the node
* does not implement this service.
*/
enum { MaxAttemptsToGetNodeInfo = 5 };
enum { TimerPollIntervalMs = 170 }; // ~ ceil(500 ms service timeout / 3)
/*
* States
*/
INodeDiscoveryHandler& handler_;
IEventTracer& tracer_;
BitSet<NodeID::Max + 1> committed_node_mask_; ///< Nodes that are marked will not be queried
NodeMap node_map_;
ServiceClient<protocol::GetNodeInfo, GetNodeInfoResponseCallback> get_node_info_client_;
Subscriber<protocol::NodeStatus, NodeStatusCallback> node_status_sub_;
/*
* Methods
*/
void trace(TraceCode code, int64_t argument) { tracer_.onEvent(code, argument); }
INode& getNode() { return node_status_sub_.getNode(); }
void removeNode(const NodeID node_id)
{
node_map_.remove(node_id);
trace(TraceDiscoveryNodeRemoved, node_id.get());
}
NodeID pickNextNodeToQuery() const
{
// This essentially means that we pick first available node. Remember that the map is unordered.
const NodeMap::KVPair* const pair = node_map_.getByIndex(0);
return (pair == UAVCAN_NULLPTR) ? NodeID() : pair->key;
}
bool needToQuery(NodeID node_id)
{
UAVCAN_ASSERT(node_id.isUnicast());
/*
* Fast check
*/
if (committed_node_mask_[node_id.get()])
{
return false;
}
/*
* Slow check - may involve full log search
*/
const INodeDiscoveryHandler::NodeAwareness awareness = handler_.checkNodeAwareness(node_id);
if (awareness == INodeDiscoveryHandler::NodeAwarenessUnknown)
{
return true;
}
else if (awareness == INodeDiscoveryHandler::NodeAwarenessKnownButNotCommitted)
{
removeNode(node_id);
return false;
}
else if (awareness == INodeDiscoveryHandler::NodeAwarenessKnownAndCommitted)
{
trace(TraceDiscoveryCommitCacheUpdated, node_id.get());
committed_node_mask_[node_id.get()] = true;
removeNode(node_id);
return false;
}
else
{
UAVCAN_ASSERT(0);
return false;
}
}
NodeID pickNextNodeToQueryAndCleanupMap()
{
NodeID node_id;
do
{
node_id = pickNextNodeToQuery();
if (node_id.isUnicast())
{
if (needToQuery(node_id))
{
return node_id;
}
else
{
removeNode(node_id);
}
}
}
while (node_id.isUnicast());
return NodeID();
}
void finalizeNodeDiscovery(const UniqueID* unique_id_or_null, NodeID node_id)
{
trace(TraceDiscoveryNodeFinalized, node_id.get() | ((unique_id_or_null == UAVCAN_NULLPTR) ? 0U : 0x100U));
removeNode(node_id);
/*
* It is paramount to check if the server is still interested to receive this data.
* Otherwise, if the node appeared in the log while we were waiting for response, we'd end up with
* duplicate node ID in the log.
*/
if (needToQuery(node_id))
{
handler_.handleNewNodeDiscovery(unique_id_or_null, node_id);
}
}
void handleGetNodeInfoResponse(const ServiceCallResult<protocol::GetNodeInfo>& result)
{
if (result.isSuccessful())
{
UAVCAN_TRACE("dynamic_node_id_server::NodeDiscoverer", "GetNodeInfo response from %d",
int(result.getCallID().server_node_id.get()));
finalizeNodeDiscovery(&result.getResponse().hardware_version.unique_id, result.getCallID().server_node_id);
}
else
{
trace(TraceDiscoveryGetNodeInfoFailure, result.getCallID().server_node_id.get());
NodeData* const data = node_map_.access(result.getCallID().server_node_id);
if (data == UAVCAN_NULLPTR)
{
return; // Probably it is a known node now
}
UAVCAN_TRACE("dynamic_node_id_server::NodeDiscoverer",
"GetNodeInfo request to %d has timed out, %d attempts",
int(result.getCallID().server_node_id.get()), int(data->num_get_node_info_attempts));
data->num_get_node_info_attempts++;
if (data->num_get_node_info_attempts >= MaxAttemptsToGetNodeInfo)
{
finalizeNodeDiscovery(UAVCAN_NULLPTR, result.getCallID().server_node_id);
// Warning: data pointer is invalidated now
}
}
}
void handleTimerEvent(const TimerEvent&) override
{
if (get_node_info_client_.hasPendingCalls())
{
return;
}
const NodeID node_id = pickNextNodeToQueryAndCleanupMap();
if (!node_id.isUnicast())
{
trace(TraceDiscoveryTimerStop, 0);
stop();
return;
}
if (!handler_.canDiscoverNewNodes())
{
return; // Timer must continue to run in order to not stuck when it unlocks
}
trace(TraceDiscoveryGetNodeInfoRequest, node_id.get());
UAVCAN_TRACE("dynamic_node_id_server::NodeDiscoverer", "Requesting GetNodeInfo from node %d",
int(node_id.get()));
const int res = get_node_info_client_.call(node_id, protocol::GetNodeInfo::Request());
if (res < 0)
{
getNode().registerInternalFailure("NodeDiscoverer GetNodeInfo call");
}
}
void handleNodeStatus(const ReceivedDataStructure<protocol::NodeStatus>& msg)
{
if (!needToQuery(msg.getSrcNodeID()))
{
return;
}
NodeData* data = node_map_.access(msg.getSrcNodeID());
if (data == UAVCAN_NULLPTR)
{
trace(TraceDiscoveryNewNodeFound, msg.getSrcNodeID().get());
data = node_map_.insert(msg.getSrcNodeID(), NodeData());
if (data == UAVCAN_NULLPTR)
{
getNode().registerInternalFailure("NodeDiscoverer OOM");
return;
}
}
UAVCAN_ASSERT(data != UAVCAN_NULLPTR);
if (msg.uptime_sec < data->last_seen_uptime)
{
trace(TraceDiscoveryNodeRestartDetected, msg.getSrcNodeID().get());
data->num_get_node_info_attempts = 0;
}
data->last_seen_uptime = msg.uptime_sec;
if (!isRunning())
{
startPeriodic(MonotonicDuration::fromMSec(TimerPollIntervalMs));
trace(TraceDiscoveryTimerStart, getPeriod().toUSec());
}
}
public:
NodeDiscoverer(INode& node, IEventTracer& tracer, INodeDiscoveryHandler& handler)
: TimerBase(node)
, handler_(handler)
, tracer_(tracer)
, node_map_(node.getAllocator())
, get_node_info_client_(node)
, node_status_sub_(node)
{ }
int init(const TransferPriority priority)
{
int res = get_node_info_client_.init(priority);
if (res < 0)
{
return res;
}
get_node_info_client_.setCallback(
GetNodeInfoResponseCallback(this, &NodeDiscoverer::handleGetNodeInfoResponse));
res = node_status_sub_.start(NodeStatusCallback(this, &NodeDiscoverer::handleNodeStatus));
if (res < 0)
{
return res;
}
// Note: the timer starts ad-hoc from the node status callback, not here.
return 0;
}
/**
* Returns true if there's at least one node with pending GetNodeInfo.
*/
bool hasUnknownNodes() const { return !node_map_.isEmpty(); }
/**
* Returns number of nodes that are being queried at the moment.
* This method is needed for testing and state visualization.
*/
uint8_t getNumUnknownNodes() const { return static_cast<uint8_t>(node_map_.getSize()); }
};
}
}
#endif // Include guard
@@ -0,0 +1,71 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_NODE_ID_SELECTOR_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_NODE_ID_SELECTOR_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <cassert>
namespace uavcan
{
namespace dynamic_node_id_server
{
/**
* Node ID allocation logic
*/
template <typename Owner>
class NodeIDSelector
{
typedef bool (Owner::*IsNodeIDTakenMethod)(const NodeID node_id) const;
const Owner* const owner_;
const IsNodeIDTakenMethod is_node_id_taken_;
public:
NodeIDSelector(const Owner* owner, IsNodeIDTakenMethod is_node_id_taken)
: owner_(owner)
, is_node_id_taken_(is_node_id_taken)
{
UAVCAN_ASSERT(owner_ != UAVCAN_NULLPTR);
UAVCAN_ASSERT(is_node_id_taken_ != UAVCAN_NULLPTR);
}
/**
* Reutrns a default-constructed (invalid) node ID if a free one could not be found.
*/
NodeID findFreeNodeID(const NodeID preferred) const
{
uint8_t candidate = preferred.isUnicast() ? preferred.get() : NodeID::MaxRecommendedForRegularNodes;
// Up
while (candidate <= NodeID::MaxRecommendedForRegularNodes)
{
if (!(owner_->*is_node_id_taken_)(candidate))
{
return candidate;
}
candidate++;
}
candidate = preferred.isUnicast() ? preferred.get() : NodeID::MaxRecommendedForRegularNodes;
// Down
while (candidate > 0)
{
if (!(owner_->*is_node_id_taken_)(candidate))
{
return candidate;
}
candidate--;
}
return NodeID();
}
};
}
}
#endif // Include guard
@@ -0,0 +1,62 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_STORAGE_BACKEND_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_STORAGE_BACKEND_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/marshal/types.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
/**
* This interface is used by the server to read and write stable storage.
* The storage is represented as a key-value container, where keys and values are ASCII strings up to 32
* characters long, not including the termination byte. Fixed block size allows for absolutely straightforward
* and efficient implementation of storage backends, e.g. based on text files.
* Keys and values may contain only non-whitespace, non-formatting printable characters.
*/
class UAVCAN_EXPORT IStorageBackend
{
public:
/**
* Maximum length of keys and values. One pair takes twice as much space.
*/
enum { MaxStringLength = 32 };
/**
* It is guaranteed that the server will never require more than this number of key/value pairs.
* Total storage space needed is (MaxKeyValuePairs * MaxStringLength * 2), not including storage overhead.
*/
enum { MaxKeyValuePairs = 512 };
/**
* This type is used to exchange data chunks with the backend.
* It doesn't use any dynamic memory; please refer to the Array<> class for details.
*/
typedef MakeString<MaxStringLength>::Type String;
/**
* Read one value from the storage.
* If such key does not exist, or if read failed, an empty string will be returned.
* This method should not block for more than 50 ms.
*/
virtual String get(const String& key) const = 0;
/**
* Create or update value for the given key. Empty value should be regarded as a request to delete the key.
* This method should not block for more than 50 ms.
* Failures will be ignored.
*/
virtual void set(const String& key, const String& value) = 0;
virtual ~IStorageBackend() { }
};
}
}
#endif // Include guard
@@ -0,0 +1,190 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_STORAGE_MARSHALLER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_STORAGE_MARSHALLER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/protocol/dynamic_node_id_server/storage_backend.hpp>
#include <uavcan/protocol/dynamic_node_id_server/types.hpp>
#include <cstdlib>
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# include <cerrno>
#endif
namespace uavcan
{
namespace dynamic_node_id_server
{
/**
* This class extends the storage backend interface with serialization/deserialization functionality.
*/
class StorageMarshaller
{
IStorageBackend& storage_;
static uint8_t convertLowerCaseHexCharToNibble(char ch)
{
const uint8_t ret = (ch > '9') ? static_cast<uint8_t>(ch - 'a' + 10) : static_cast<uint8_t>(ch - '0');
UAVCAN_ASSERT(ret < 16);
return ret;
}
public:
StorageMarshaller(IStorageBackend& storage)
: storage_(storage)
{ }
/**
* Turns a unique ID into a hex string that can be used as a key or as a value.
*/
static IStorageBackend::String convertUniqueIDToHex(const UniqueID& key)
{
IStorageBackend::String serialized;
for (uint8_t i = 0; i < UniqueID::MaxSize; i++)
{
serialized.appendFormatted("%02x", key.at(i));
}
UAVCAN_ASSERT(serialized.size() == UniqueID::MaxSize * 2);
return serialized;
}
/**
* These methods set the value and then immediately read it back.
* 1. Serialize the value.
* 2. Update the value on the backend.
* 3. Call get() with the same value argument.
* The caller then is supposed to check whether the argument has the desired value.
*/
int setAndGetBack(const IStorageBackend::String& key, uint32_t& inout_value)
{
IStorageBackend::String serialized;
serialized.appendFormatted("%lu", inout_value);
UAVCAN_TRACE("StorageMarshaller", "Set %s = %s", key.c_str(), serialized.c_str());
storage_.set(key, serialized);
return get(key, inout_value);
}
int setAndGetBack(const IStorageBackend::String& key, UniqueID& inout_value)
{
const IStorageBackend::String serialized = convertUniqueIDToHex(inout_value);
UAVCAN_TRACE("StorageMarshaller", "Set %s = %s", key.c_str(), serialized.c_str());
storage_.set(key, serialized);
return get(key, inout_value);
}
/**
* Getters simply read and deserialize the value.
* 1. Read the value back from the backend; return false if read fails.
* 2. Deserealize the newly read value; return false if deserialization fails.
* 3. Update the argument with deserialized value.
* 4. Return true.
*/
int get(const IStorageBackend::String& key, uint32_t& out_value) const
{
/*
* Reading the storage
*/
const IStorageBackend::String val = storage_.get(key);
if (val.empty())
{
return -ErrFailure;
}
/*
* Per MISRA C++ recommendations, checking the inputs instead of relying solely on errno.
* The value must contain only numeric characters.
*/
for (IStorageBackend::String::const_iterator it = val.begin(); it != val.end(); ++it)
{
if (static_cast<char>(*it) < '0' || static_cast<char>(*it) > '9')
{
return -ErrFailure;
}
}
if (val.size() > 10) // len(str(0xFFFFFFFF))
{
return -ErrFailure;
}
/*
* Conversion is carried out here
*/
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
errno = 0;
#endif
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
const unsigned long long x = std::strtoull(val.c_str(), UAVCAN_NULLPTR, 10);
#else
// There was no strtoull() before C++11, so we need to resort to strtoul()
StaticAssert<(sizeof(unsigned long) >= sizeof(uint32_t))>::check();
const unsigned long x = std::strtoul(val.c_str(), UAVCAN_NULLPTR, 10);
#endif
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
if (errno != 0)
{
return -ErrFailure;
}
#endif
out_value = static_cast<uint32_t>(x);
return 0;
}
int get(const IStorageBackend::String& key, UniqueID& out_value) const
{
static const uint8_t NumBytes = UniqueID::MaxSize;
/*
* Reading the storage
*/
IStorageBackend::String val = storage_.get(key);
if (val.size() != NumBytes * 2)
{
return -ErrFailure;
}
/*
* The value must contain only hexadecimal numbers.
*/
val.convertToLowerCaseASCII();
for (IStorageBackend::String::const_iterator it = val.begin(); it != val.end(); ++it)
{
if ((static_cast<char>(*it) < '0' || static_cast<char>(*it) > '9') &&
(static_cast<char>(*it) < 'a' || static_cast<char>(*it) > 'f'))
{
return -ErrFailure;
}
}
/*
* Conversion is carried out here
*/
IStorageBackend::String::const_iterator it = val.begin();
for (uint8_t byte_index = 0; byte_index < NumBytes; byte_index++)
{
out_value[byte_index] =
static_cast<uint8_t>(convertLowerCaseHexCharToNibble(static_cast<char>(*it++)) << 4);
out_value[byte_index] =
static_cast<uint8_t>(convertLowerCaseHexCharToNibble(static_cast<char>(*it++)) | out_value[byte_index]);
}
return 0;
}
};
}
}
#endif // Include guard
@@ -0,0 +1,27 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_TYPES_HPP_INCLUDED
#define UAVCAN_PROTOCOL_DYNAMIC_NODE_ID_SERVER_TYPES_HPP_INCLUDED
#include <uavcan/build_config.hpp>
// UAVCAN types
#include <uavcan/protocol/dynamic_node_id/server/Entry.hpp>
namespace uavcan
{
namespace dynamic_node_id_server
{
using namespace ::uavcan::protocol::dynamic_node_id;
/**
* Node Unique ID
*/
typedef protocol::dynamic_node_id::server::Entry::FieldTypes::unique_id UniqueID;
}
}
#endif // Include guard
@@ -0,0 +1,321 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_FILE_SERVER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_FILE_SERVER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/node/service_server.hpp>
#include <uavcan/util/method_binder.hpp>
// UAVCAN types
#include <uavcan/protocol/file/GetInfo.hpp>
#include <uavcan/protocol/file/GetDirectoryEntryInfo.hpp>
#include <uavcan/protocol/file/Read.hpp>
#include <uavcan/protocol/file/Write.hpp>
#include <uavcan/protocol/file/Delete.hpp>
namespace uavcan
{
/**
* The file server backend should implement this interface.
* Note that error codes returned by these methods are defined in uavcan.protocol.file.Error; these are
* not the same as libuavcan-internal error codes defined in uavcan.error.hpp.
*/
class UAVCAN_EXPORT IFileServerBackend
{
public:
typedef protocol::file::Path::FieldTypes::path Path;
typedef protocol::file::EntryType EntryType;
typedef protocol::file::Error Error;
IFileServerBackend::Path root_path_;
IFileServerBackend::Path alt_root_path_;
/**
* All read operations must return this number of bytes, unless end of file is reached.
*/
enum { ReadSize = protocol::file::Read::Response::FieldTypes::data::MaxSize };
/**
* Shortcut for uavcan.protocol.file.Path.SEPARATOR.
*/
static char getPathSeparator() { return static_cast<char>(protocol::file::Path::SEPARATOR); }
/**
* Set a base path to the files.
*/
void setRootPath(const char * path)
{
if (path)
{
root_path_.clear();
root_path_ = path;
if (root_path_.back() != getPathSeparator())
{
root_path_.push_back(getPathSeparator());
}
}
}
void setAltRootPath(const char * path)
{
if (path)
{
alt_root_path_.clear();
alt_root_path_ = path;
if (alt_root_path_.back() != getPathSeparator())
{
alt_root_path_.push_back(getPathSeparator());
}
}
}
/**
* Get a base path to the files.
*/
Path& getRootPath()
{
return root_path_;
}
/**
* Get a base path to the files.
*/
Path& getAltRootPath()
{
return alt_root_path_;
}
/**
* Backend for uavcan.protocol.file.GetInfo.
* Refer to uavcan.protocol.file.EntryType for the list of available bit flags.
* Implementation of this method is required.
* On success the method must return zero.
*/
virtual int16_t getInfo(const Path& path, uint64_t& out_size, EntryType& out_type) = 0;
/**
* Backend for uavcan.protocol.file.Read.
* Implementation of this method is required.
* @ref inout_size is set to @ref ReadSize; read operation is required to return exactly this amount, except
* if the end of file is reached.
* On success the method must return zero.
*/
virtual int16_t read(const Path& path, const uint64_t offset, uint8_t* out_buffer, uint16_t& inout_size) = 0;
// Methods below are optional.
/**
* Backend for uavcan.protocol.file.Write.
* Implementation of this method is NOT required; by default it returns uavcan.protocol.file.Error.NOT_IMPLEMENTED.
* On success the method must return zero.
*/
virtual int16_t write(const Path& path, const uint64_t offset, const uint8_t* buffer, const uint16_t size)
{
(void)path;
(void)offset;
(void)buffer;
(void)size;
return Error::NOT_IMPLEMENTED;
}
/**
* Backend for uavcan.protocol.file.Delete. ('delete' is a C++ keyword, so 'remove' is used instead)
* Implementation of this method is NOT required; by default it returns uavcan.protocol.file.Error.NOT_IMPLEMENTED.
* On success the method must return zero.
*/
virtual int16_t remove(const Path& path)
{
(void)path;
return Error::NOT_IMPLEMENTED;
}
/**
* Backend for uavcan.protocol.file.GetDirectoryEntryInfo.
* Refer to uavcan.protocol.file.EntryType for the list of available bit flags.
* Implementation of this method is NOT required; by default it returns uavcan.protocol.file.Error.NOT_IMPLEMENTED.
* On success the method must return zero.
*/
virtual int16_t getDirectoryEntryInfo(const Path& directory_path, const uint32_t entry_index,
EntryType& out_type, Path& out_entry_full_path)
{
(void)directory_path;
(void)entry_index;
(void)out_type;
(void)out_entry_full_path;
return Error::NOT_IMPLEMENTED;
}
virtual ~IFileServerBackend() { }
};
/**
* Basic file server implements only the following services:
* uavcan.protocol.file.GetInfo
* uavcan.protocol.file.Read
* Also see @ref IFileServerBackend.
*/
class BasicFileServer
{
typedef MethodBinder<BasicFileServer*,
void (BasicFileServer::*)(const protocol::file::GetInfo::Request&, protocol::file::GetInfo::Response&)>
GetInfoCallback;
typedef MethodBinder<BasicFileServer*,
void (BasicFileServer::*)(const protocol::file::Read::Request&, protocol::file::Read::Response&)>
ReadCallback;
ServiceServer<protocol::file::GetInfo, GetInfoCallback> get_info_srv_;
ServiceServer<protocol::file::Read, ReadCallback> read_srv_;
void handleGetInfo(const protocol::file::GetInfo::Request& req, protocol::file::GetInfo::Response& resp)
{
resp.error.value = backend_.getInfo(req.path.path, resp.size, resp.entry_type);
}
void handleRead(const protocol::file::Read::Request& req, protocol::file::Read::Response& resp)
{
uint16_t inout_size = resp.data.capacity();
resp.data.resize(inout_size);
resp.error.value = backend_.read(req.path.path, req.offset, resp.data.begin(), inout_size);
if (resp.error.value != protocol::file::Error::OK)
{
inout_size = 0;
}
if (inout_size > resp.data.capacity())
{
UAVCAN_ASSERT(0);
resp.error.value = protocol::file::Error::UNKNOWN_ERROR;
}
else
{
resp.data.resize(inout_size);
}
}
protected:
IFileServerBackend& backend_; ///< Derived types can use it
public:
BasicFileServer(INode& node, IFileServerBackend& backend)
: get_info_srv_(node)
, read_srv_(node)
, backend_(backend)
{ }
int start(const char* server_root = UAVCAN_NULLPTR, const char* server_alt_root = UAVCAN_NULLPTR)
{
backend_.setRootPath(server_root);
backend_.setAltRootPath(server_alt_root);
int res = get_info_srv_.start(GetInfoCallback(this, &BasicFileServer::handleGetInfo));
if (res < 0)
{
return res;
}
res = read_srv_.start(ReadCallback(this, &BasicFileServer::handleRead));
if (res < 0)
{
return res;
}
return 0;
}
};
/**
* Full file server implements all file services:
* uavcan.protocol.file.GetInfo
* uavcan.protocol.file.Read
* uavcan.protocol.file.Write
* uavcan.protocol.file.Delete
* uavcan.protocol.file.GetDirectoryEntryInfo
* Also see @ref IFileServerBackend.
*/
class FileServer : protected BasicFileServer
{
typedef MethodBinder<FileServer*,
void (FileServer::*)(const protocol::file::Write::Request&, protocol::file::Write::Response&)>
WriteCallback;
typedef MethodBinder<FileServer*,
void (FileServer::*)(const protocol::file::Delete::Request&, protocol::file::Delete::Response&)>
DeleteCallback;
typedef MethodBinder<FileServer*,
void (FileServer::*)(const protocol::file::GetDirectoryEntryInfo::Request&,
protocol::file::GetDirectoryEntryInfo::Response&)>
GetDirectoryEntryInfoCallback;
ServiceServer<protocol::file::Write, WriteCallback> write_srv_;
ServiceServer<protocol::file::Delete, DeleteCallback> delete_srv_;
ServiceServer<protocol::file::GetDirectoryEntryInfo, GetDirectoryEntryInfoCallback> get_directory_entry_info_srv_;
void handleWrite(const protocol::file::Write::Request& req, protocol::file::Write::Response& resp)
{
resp.error.value = backend_.write(req.path.path, req.offset, req.data.begin(), req.data.size());
}
void handleDelete(const protocol::file::Delete::Request& req, protocol::file::Delete::Response& resp)
{
resp.error.value = backend_.remove(req.path.path);
}
void handleGetDirectoryEntryInfo(const protocol::file::GetDirectoryEntryInfo::Request& req,
protocol::file::GetDirectoryEntryInfo::Response& resp)
{
resp.error.value = backend_.getDirectoryEntryInfo(req.directory_path.path, req.entry_index,
resp.entry_type, resp.entry_full_path.path);
}
public:
FileServer(INode& node, IFileServerBackend& backend)
: BasicFileServer(node, backend)
, write_srv_(node)
, delete_srv_(node)
, get_directory_entry_info_srv_(node)
{ }
int start()
{
int res = BasicFileServer::start();
if (res < 0)
{
return res;
}
res = write_srv_.start(WriteCallback(this, &FileServer::handleWrite));
if (res < 0)
{
return res;
}
res = delete_srv_.start(DeleteCallback(this, &FileServer::handleDelete));
if (res < 0)
{
return res;
}
res = get_directory_entry_info_srv_.start(
GetDirectoryEntryInfoCallback(this, &FileServer::handleGetDirectoryEntryInfo));
if (res < 0)
{
return res;
}
return 0;
}
};
}
#endif // Include guard
@@ -0,0 +1,472 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_FIRMWARE_UPDATE_TRIGGER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_FIRMWARE_UPDATE_TRIGGER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/node/service_client.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/util/map.hpp>
#include <uavcan/protocol/node_info_retriever.hpp>
// UAVCAN types
#include <uavcan/protocol/file/BeginFirmwareUpdate.hpp>
namespace uavcan
{
/**
* Application-specific firmware version checking logic.
* Refer to @ref FirmwareUpdateTrigger for details.
*/
class IFirmwareVersionChecker
{
public:
/**
* This value is limited by the pool block size minus some extra data. Please refer to the Map<> documentation
* for details. If this size is set too high, the compilation will fail in the Map<> template.
*/
enum { MaxFirmwareFilePathLength = 40 };
/**
* This type is used to store path to firmware file that the target node will retrieve using the
* service uavcan.protocol.file.Read. Note that the maximum length is limited due to some specifics of
* libuavcan (@ref MaxFirmwareFilePathLength), this is NOT a protocol-level limitation.
*/
typedef MakeString<MaxFirmwareFilePathLength>::Type FirmwareFilePath;
/**
* This method will be invoked when the class obtains a response to GetNodeInfo request.
*
* @param node_id Node ID that this GetNodeInfo response was received from.
*
* @param node_info Actual node info structure; refer to uavcan.protocol.GetNodeInfo for details.
*
* @param out_firmware_file_path The implementation should return the firmware image path via this argument.
* Note that this path must be reachable via uavcan.protocol.file.Read service.
* Refer to @ref FileServer and @ref BasicFileServer for details.
*
* @return True - the class will begin sending update requests.
* False - the node will be ignored, no request will be sent.
*/
virtual bool shouldRequestFirmwareUpdate(NodeID node_id, const protocol::GetNodeInfo::Response& node_info,
FirmwareFilePath& out_firmware_file_path) = 0;
/**
* This method will be invoked when a node responds to the update request with an error. If the request simply
* times out, this method will not be invoked.
* Note that if by the time of arrival of the response the node is already removed, this method will not be called.
*
* SPECIAL CASE: If the node responds with ERROR_IN_PROGRESS, the class will assume that further requesting
* is not needed anymore. This method will not be invoked.
*
* @param node_id Node ID that returned this error.
*
* @param error_response Contents of the error response. It contains error code and text.
*
* @param out_firmware_file_path New firmware path if a retry is needed. Note that this argument will be
* initialized with old path, so if the same path needs to be reused, this
* argument should be left unchanged.
*
* @return True - the class will continue sending update requests with new firmware path.
* False - the node will be forgotten, new requests will not be sent.
*/
virtual bool shouldRetryFirmwareUpdate(NodeID node_id,
const protocol::file::BeginFirmwareUpdate::Response& error_response,
FirmwareFilePath& out_firmware_file_path) = 0;
/**
* This node is invoked when the node responds to the update request with confirmation.
* Note that if by the time of arrival of the response the node is already removed, this method will not be called.
*
* Implementation is optional; default one does nothing.
*
* @param node_id Node ID that confirmed the request.
*
* @param response Actual response.
*/
virtual void handleFirmwareUpdateConfirmation(NodeID node_id,
const protocol::file::BeginFirmwareUpdate::Response& response)
{
(void)node_id;
(void)response;
}
virtual ~IFirmwareVersionChecker() { }
};
/**
* This class subscribes to updates from @ref NodeInfoRetriever in order to detect nodes that need firmware
* updates. The decision process of whether a firmware update is needed is relayed to the application via
* @ref IFirmwareVersionChecker. If the application confirms that the update is needed, this class will begin
* sending uavcan.protocol.file.BeginFirmwareUpdate periodically (period is configurable) to every node that
* needs an update in a round-robin fashion. There are the following termination conditions for the periodical
* sending process:
*
* - The node responds with confirmation. In this case the class will forget about the node on the assumption
* that its job is done here. Confirmation will be reported to the application via the interface.
*
* - The node responds with an error, and the error code is not ERROR_IN_PROGRESS. In this case the class will
* request the application via the interface mentioned above about its further actions - either give up or
* retry, possibly with a different firmware.
*
* - The node responds with error ERROR_IN_PROGRESS. In this case the class behaves exactly in the same way as if
* response was successful (because the firmware is alredy being updated, so the goal is fulfilled).
* Confirmation will be reported to the application via the interface.
*
* - The node goes offline or restarts. In this case the node will be immediately forgotten, and the process
* will repeat again later because the node info retriever re-queries GetNodeInfo every time when a node restarts.
*
* Since the target node (i.e. node that is being updated) will try to retrieve the specified firmware file using
* the file services (uavcan.protocol.file.*), the provided firmware path must be reachable for the file server
* (@ref FileServer, @ref BasicFileServer). Normally, an application that serves as UAVCAN firmware update server
* will include at least the following components:
* - this firmware update trigger;
* - dynamic node ID allocation server;
* - file server.
*
* Implementation details: the class uses memory pool to keep the list of nodes that have not responded yet, which
* limits the maximum length of the path to the firmware file, which is covered in @ref IFirmwareVersionChecker.
* To somewhat relieve the maximum path length limitation, the class can be supplied with a common prefix that
* will be prepended to firmware pathes before sending requests.
* Interval at which requests are being sent is configurable, but the default value should cover the needs of
* virtually all use cases (as always).
*/
class FirmwareUpdateTrigger : public INodeInfoListener,
private TimerBase
{
typedef MethodBinder<FirmwareUpdateTrigger*,
void (FirmwareUpdateTrigger::*)(const ServiceCallResult<protocol::file::BeginFirmwareUpdate>&)>
BeginFirmwareUpdateResponseCallback;
typedef IFirmwareVersionChecker::FirmwareFilePath FirmwareFilePath;
enum { DefaultRequestIntervalMs = 1000 }; ///< Shall not be less than default service response timeout.
struct NextNodeIDSearchPredicate : ::uavcan::Noncopyable
{
enum { DefaultOutput = 0xFFU };
const FirmwareUpdateTrigger& owner;
uint8_t output;
NextNodeIDSearchPredicate(const FirmwareUpdateTrigger& arg_owner)
: owner(arg_owner)
, output(DefaultOutput)
{ }
bool operator()(const NodeID node_id, const FirmwareFilePath&)
{
if (node_id.get() > owner.last_queried_node_id_ &&
!owner.begin_fw_update_client_.hasPendingCallToServer(node_id))
{
output = min(output, node_id.get());
}
return false;
}
};
/*
* State
*/
ServiceClient<protocol::file::BeginFirmwareUpdate, BeginFirmwareUpdateResponseCallback> begin_fw_update_client_;
IFirmwareVersionChecker& checker_;
NodeInfoRetriever* node_info_retriever_;
Map<NodeID, FirmwareFilePath> pending_nodes_;
MonotonicDuration request_interval_;
FirmwareFilePath common_path_prefix_;
mutable uint8_t last_queried_node_id_;
/*
* Methods of INodeInfoListener
*/
virtual void handleNodeInfoUnavailable(NodeID node_id)
{
UAVCAN_TRACE("FirmwareUpdateTrigger", "Node ID %d could not provide GetNodeInfo response", int(node_id.get()));
pending_nodes_.remove(node_id); // For extra paranoia
}
virtual void handleNodeInfoRetrieved(const NodeID node_id, const protocol::GetNodeInfo::Response& node_info)
{
FirmwareFilePath firmware_file_path;
const bool update_needed = checker_.shouldRequestFirmwareUpdate(node_id, node_info, firmware_file_path);
if (update_needed)
{
UAVCAN_TRACE("FirmwareUpdateTrigger", "Node ID %d requires update; file path: %s",
int(node_id.get()), firmware_file_path.c_str());
trySetPendingNode(node_id, firmware_file_path);
}
else
{
UAVCAN_TRACE("FirmwareUpdateTrigger", "Node ID %d does not need update", int(node_id.get()));
pending_nodes_.remove(node_id);
}
}
virtual void handleNodeStatusChange(const NodeStatusMonitor::NodeStatusChangeEvent& event)
{
if (event.status.mode == protocol::NodeStatus::MODE_OFFLINE)
{
pending_nodes_.remove(event.node_id);
UAVCAN_TRACE("FirmwareUpdateTrigger", "Node ID %d is offline hence forgotten", int(event.node_id.get()));
}
}
/*
* Own methods
*/
INode& getNode() { return begin_fw_update_client_.getNode(); }
void trySetPendingNode(const NodeID node_id, const FirmwareFilePath& path)
{
if (UAVCAN_NULLPTR != pending_nodes_.insert(node_id, path))
{
if (!TimerBase::isRunning())
{
TimerBase::startPeriodic(request_interval_);
UAVCAN_TRACE("FirmwareUpdateTrigger", "Timer started");
}
}
else
{
getNode().registerInternalFailure("FirmwareUpdateTrigger OOM");
}
}
NodeID pickNextNodeID() const
{
// We can't do index search because indices are unstable in Map<>
// First try - from the current node up
NextNodeIDSearchPredicate s1(*this);
(void)pending_nodes_.find<NextNodeIDSearchPredicate&>(s1);
if (s1.output != NextNodeIDSearchPredicate::DefaultOutput)
{
last_queried_node_id_ = s1.output;
}
else if (last_queried_node_id_ != 0)
{
// Nothing was found, resetting the selector and trying again
UAVCAN_TRACE("FirmwareUpdateTrigger", "Node selector reset, last value: %d", int(last_queried_node_id_));
last_queried_node_id_ = 0;
NextNodeIDSearchPredicate s2(*this);
(void)pending_nodes_.find<NextNodeIDSearchPredicate&>(s2);
if (s2.output != NextNodeIDSearchPredicate::DefaultOutput)
{
last_queried_node_id_ = s2.output;
}
}
else
{
; // Hopeless
}
UAVCAN_TRACE("FirmwareUpdateTrigger", "Next node ID to query: %d, pending nodes: %u, pending calls: %u",
int(last_queried_node_id_), pending_nodes_.getSize(),
begin_fw_update_client_.getNumPendingCalls());
return last_queried_node_id_;
}
void handleBeginFirmwareUpdateResponse(const ServiceCallResult<protocol::file::BeginFirmwareUpdate>& result)
{
if (!result.isSuccessful())
{
UAVCAN_TRACE("FirmwareUpdateTrigger", "Request to %d has timed out, will retry",
int(result.getCallID().server_node_id.get()));
return;
}
FirmwareFilePath* const old_path = pending_nodes_.access(result.getCallID().server_node_id);
if (old_path == UAVCAN_NULLPTR)
{
// The entry has been removed, assuming that it's not needed anymore
return;
}
const bool confirmed =
result.getResponse().error == protocol::file::BeginFirmwareUpdate::Response::ERROR_OK ||
result.getResponse().error == protocol::file::BeginFirmwareUpdate::Response::ERROR_IN_PROGRESS;
if (confirmed)
{
UAVCAN_TRACE("FirmwareUpdateTrigger", "Node %d confirmed the update request",
int(result.getCallID().server_node_id.get()));
pending_nodes_.remove(result.getCallID().server_node_id);
checker_.handleFirmwareUpdateConfirmation(result.getCallID().server_node_id, result.getResponse());
}
else
{
UAVCAN_ASSERT(old_path != UAVCAN_NULLPTR);
UAVCAN_ASSERT(TimerBase::isRunning());
// We won't have to call trySetPendingNode(), because we'll directly update the old path via the pointer
const bool update_needed =
checker_.shouldRetryFirmwareUpdate(result.getCallID().server_node_id, result.getResponse(), *old_path);
if (!update_needed)
{
UAVCAN_TRACE("FirmwareUpdateTrigger", "Node %d does not need retry",
int(result.getCallID().server_node_id.get()));
pending_nodes_.remove(result.getCallID().server_node_id);
}
}
}
virtual void handleTimerEvent(const TimerEvent&)
{
if (pending_nodes_.isEmpty())
{
TimerBase::stop();
UAVCAN_TRACE("FirmwareUpdateTrigger", "Timer stopped");
return;
}
const NodeID node_id = pickNextNodeID();
if (!node_id.isUnicast())
{
return;
}
FirmwareFilePath* const path = pending_nodes_.access(node_id);
if (path == UAVCAN_NULLPTR)
{
UAVCAN_ASSERT(0); // pickNextNodeID() returned a node ID that is not present in the map
return;
}
protocol::file::BeginFirmwareUpdate::Request req;
req.source_node_id = getNode().getNodeID().get();
req.image_file_remote_path.path = path->c_str();
UAVCAN_TRACE("FirmwareUpdateTrigger", "Request to %d with path: %s",
int(node_id.get()), req.image_file_remote_path.path.c_str());
const int call_res = begin_fw_update_client_.call(node_id, req);
if (call_res < 0)
{
getNode().registerInternalFailure("FirmwareUpdateTrigger call");
}
}
public:
FirmwareUpdateTrigger(INode& node, IFirmwareVersionChecker& checker)
: TimerBase(node)
, begin_fw_update_client_(node)
, checker_(checker)
, node_info_retriever_(UAVCAN_NULLPTR)
, pending_nodes_(node.getAllocator())
, request_interval_(MonotonicDuration::fromMSec(DefaultRequestIntervalMs))
, last_queried_node_id_(0)
{ }
~FirmwareUpdateTrigger()
{
if (node_info_retriever_ != UAVCAN_NULLPTR)
{
node_info_retriever_->removeListener(this);
}
}
/**
* Starts the object. Once started, it can't be stopped unless destroyed.
*
* @param node_info_retriever The object will register itself against this retriever.
* When the destructor is called, the object will unregister itself.
*
* @param common_path_prefix If set, this path will be prefixed to all firmware pathes provided by the
* application interface. The prefix does not need to end with path separator;
* the last trailing one will be removed (so use '//' if you need '/').
* By default the prefix is empty.
*
* @return Negative error code.
*/
int start(NodeInfoRetriever& node_info_retriever,
const FirmwareFilePath& arg_common_path_prefix = FirmwareFilePath(),
const FirmwareFilePath& arg_alt_path_prefix = FirmwareFilePath(),
const TransferPriority priority = TransferPriority::OneHigherThanLowest)
{
/*
* Configuring the node info retriever
*/
node_info_retriever_ = &node_info_retriever;
int res = node_info_retriever_->addListener(this);
if (res < 0)
{
return res;
}
/*
* Initializing the prefix, removing only the last trailing path separator.
*/
common_path_prefix_ = arg_common_path_prefix;
if (!common_path_prefix_.empty() &&
*(common_path_prefix_.end() - 1) == protocol::file::Path::SEPARATOR)
{
common_path_prefix_.resize(uint8_t(common_path_prefix_.size() - 1U));
}
/*
* Initializing the client
*/
res = begin_fw_update_client_.init(priority);
if (res < 0)
{
return res;
}
begin_fw_update_client_.setCallback(
BeginFirmwareUpdateResponseCallback(this, &FirmwareUpdateTrigger::handleBeginFirmwareUpdateResponse));
// The timer will be started ad-hoc
return 0;
}
/**
* Interval at which uavcan.protocol.file.BeginFirmwareUpdate requests are being sent.
* Note that default value should be OK for any use case.
*/
MonotonicDuration getRequestInterval() const { return request_interval_; }
void setRequestInterval(const MonotonicDuration interval)
{
if (interval.isPositive())
{
request_interval_ = interval;
if (TimerBase::isRunning()) // Restarting with new interval
{
TimerBase::startPeriodic(request_interval_);
}
}
else
{
UAVCAN_ASSERT(0);
}
}
/**
* This method is mostly needed for testing.
* When triggering is not in progress, the class consumes zero CPU time.
*/
bool isTimerRunning() const { return TimerBase::isRunning(); }
unsigned getNumPendingNodes() const
{
const unsigned ret = pending_nodes_.getSize();
UAVCAN_ASSERT((ret > 0) ? isTimerRunning() : true);
return ret;
}
};
}
#endif // Include guard
@@ -0,0 +1,262 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_GLOBAL_TIME_SYNC_MASTER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_GLOBAL_TIME_SYNC_MASTER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/node/publisher.hpp>
#include <uavcan/node/subscriber.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/util/lazy_constructor.hpp>
#include <uavcan/protocol/GlobalTimeSync.hpp>
#include <uavcan/debug.hpp>
#include <cstdlib>
#include <cassert>
namespace uavcan
{
/**
* Please read the specs to learn how the time synchronization works.
*
* No more than one object of this class is allowed per node; otherwise a disaster is bound to happen.
*
* NOTE: In order for this class to work, the platform driver must implement
* CAN bus TX loopback with both UTC and monotonic timestamping.
*
* Ref. M. Gergeleit, H. Streich - "Implementing a Distributed High-Resolution Real-Time Clock using the CAN-Bus"
*
* TODO: Enforce max one master per node
*/
class UAVCAN_EXPORT GlobalTimeSyncMaster : protected LoopbackFrameListenerBase
{
class IfaceMaster
{
Publisher<protocol::GlobalTimeSync> pub_;
MonotonicTime iface_prev_pub_mono_;
UtcTime prev_tx_utc_;
const uint8_t iface_index_;
public:
IfaceMaster(INode& node, uint8_t iface_index)
: pub_(node)
, iface_index_(iface_index)
{
UAVCAN_ASSERT(iface_index < MaxCanIfaces);
}
int init(TransferPriority priority)
{
const int res = pub_.init(priority);
if (res >= 0)
{
pub_.getTransferSender().setIfaceMask(uint8_t(1 << iface_index_));
pub_.getTransferSender().setCanIOFlags(CanIOFlagLoopback);
}
return res;
}
void setTxTimestamp(UtcTime ts)
{
if (ts.isZero())
{
UAVCAN_ASSERT(0);
pub_.getNode().registerInternalFailure("GlobalTimeSyncMaster zero TX ts");
return;
}
if (!prev_tx_utc_.isZero())
{
prev_tx_utc_ = UtcTime(); // Reset again, because there's something broken in the driver and we don't trust it
pub_.getNode().registerInternalFailure("GlobalTimeSyncMaster pub conflict");
return;
}
prev_tx_utc_ = ts;
}
int publish(TransferID tid, MonotonicTime current_time)
{
UAVCAN_ASSERT(pub_.getTransferSender().getCanIOFlags() == CanIOFlagLoopback);
UAVCAN_ASSERT(pub_.getTransferSender().getIfaceMask() == (1 << iface_index_));
const MonotonicDuration since_prev_pub = current_time - iface_prev_pub_mono_;
iface_prev_pub_mono_ = current_time;
UAVCAN_ASSERT(since_prev_pub.isPositive());
const bool long_period = since_prev_pub.toMSec() >= protocol::GlobalTimeSync::MAX_BROADCASTING_PERIOD_MS;
protocol::GlobalTimeSync msg;
msg.previous_transmission_timestamp_usec = long_period ? 0 : prev_tx_utc_.toUSec();
prev_tx_utc_ = UtcTime();
UAVCAN_TRACE("GlobalTimeSyncMaster", "Publishing %llu iface=%i tid=%i",
static_cast<unsigned long long>(msg.previous_transmission_timestamp_usec),
int(iface_index_), int(tid.get()));
return pub_.broadcast(msg, tid);
}
};
INode& node_;
LazyConstructor<IfaceMaster> iface_masters_[MaxCanIfaces];
MonotonicTime prev_pub_mono_;
DataTypeID dtid_;
bool initialized_;
virtual void handleLoopbackFrame(const RxFrame& frame)
{
const uint8_t iface = frame.getIfaceIndex();
if (initialized_ && iface < MaxCanIfaces)
{
if (frame.getDataTypeID() == dtid_ &&
frame.getTransferType() == TransferTypeMessageBroadcast &&
frame.isStartOfTransfer() && frame.isEndOfTransfer() &&
frame.getSrcNodeID() == node_.getNodeID())
{
iface_masters_[iface]->setTxTimestamp(frame.getUtcTimestamp());
}
}
else
{
UAVCAN_ASSERT(0);
}
}
int getNextTransferID(TransferID& tid)
{
const MonotonicDuration max_transfer_interval =
MonotonicDuration::fromMSec(protocol::GlobalTimeSync::MAX_BROADCASTING_PERIOD_MS);
const OutgoingTransferRegistryKey otr_key(dtid_, TransferTypeMessageBroadcast, NodeID::Broadcast);
const MonotonicTime otr_deadline = node_.getMonotonicTime() + max_transfer_interval;
TransferID* const tid_ptr =
node_.getDispatcher().getOutgoingTransferRegistry().accessOrCreate(otr_key, otr_deadline);
if (tid_ptr == UAVCAN_NULLPTR)
{
return -ErrMemory;
}
tid = *tid_ptr;
tid_ptr->increment();
return 0;
}
public:
explicit GlobalTimeSyncMaster(INode& node)
: LoopbackFrameListenerBase(node.getDispatcher())
, node_(node)
, initialized_(false)
{ }
/**
* Merely prepares the class to work, doesn't do anything else.
* Must be called before the master can be used.
* Returns negative error code.
*/
int init(const TransferPriority priority = TransferPriority::OneLowerThanHighest)
{
if (initialized_)
{
return 0;
}
// Data type ID
const DataTypeDescriptor* const desc =
GlobalDataTypeRegistry::instance().find(DataTypeKindMessage, protocol::GlobalTimeSync::getDataTypeFullName());
if (desc == UAVCAN_NULLPTR)
{
return -ErrUnknownDataType;
}
dtid_ = desc->getID();
// Iface master array
int res = -ErrLogic;
for (uint8_t i = 0; i < MaxCanIfaces; i++)
{
if (!iface_masters_[i].isConstructed())
{
iface_masters_[i].construct<INode&, uint8_t>(node_, i);
}
res = iface_masters_[i]->init(priority);
if (res < 0)
{
break;
}
}
// Loopback listener
initialized_ = res >= 0;
if (initialized_)
{
LoopbackFrameListenerBase::startListening();
}
return res;
}
/**
* Whether the master instance has been initialized.
*/
bool isInitialized() const { return initialized_; }
/**
* Publishes one sync message.
*
* Every call to this method hints the master to publish the next sync message once. Exact time will
* be obtained from the TX loopback timestamp field.
*
* This method shall be called with a proper interval - refer to the time sync message definition
* for min/max interval values.
*
* Returns negative error code.
*/
int publish()
{
if (!initialized_)
{
const int res = init();
if (res < 0)
{
return res;
}
}
/*
* Enforce max frequency
*/
const MonotonicTime current_time = node_.getMonotonicTime();
{
const MonotonicDuration since_prev_pub = current_time - prev_pub_mono_;
UAVCAN_ASSERT(since_prev_pub.isPositive());
if (since_prev_pub.toMSec() < protocol::GlobalTimeSync::MIN_BROADCASTING_PERIOD_MS)
{
UAVCAN_TRACE("GlobalTimeSyncMaster", "Publication skipped");
return 0;
}
prev_pub_mono_ = current_time;
}
/*
* Obtain common Transfer ID for all masters
*/
TransferID tid;
{
const int tid_res = getNextTransferID(tid);
if (tid_res < 0)
{
return tid_res;
}
}
for (uint8_t i = 0; i < node_.getDispatcher().getCanIOManager().getNumIfaces(); i++)
{
const int res = iface_masters_[i]->publish(tid, current_time);
if (res < 0)
{
return res;
}
}
return 0;
}
};
}
#endif // UAVCAN_PROTOCOL_GLOBAL_TIME_SYNC_MASTER_HPP_INCLUDED
@@ -0,0 +1,198 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_GLOBAL_TIME_SYNC_SLAVE_HPP_INCLUDED
#define UAVCAN_PROTOCOL_GLOBAL_TIME_SYNC_SLAVE_HPP_INCLUDED
#include <uavcan/node/subscriber.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/protocol/GlobalTimeSync.hpp>
#include <uavcan/debug.hpp>
#include <cassert>
namespace uavcan
{
/**
* Please read the specs to learn how the time synchronization works.
*
* No more than one object of this class is allowed per node; otherwise a disaster is bound to happen.
*
* NOTE: In order for this class to work, the platform driver must implement:
* - CAN bus RX UTC timestamping;
* - Clock adjustment method in the system clock interface @ref ISystemClock::adjustUtc().
*
* Ref. M. Gergeleit, H. Streich - "Implementing a Distributed High-Resolution Real-Time Clock using the CAN-Bus"
* http://modecs.cs.uni-salzburg.at/results/related_documents/CAN_clock.pdf
*/
class UAVCAN_EXPORT GlobalTimeSyncSlave : Noncopyable
{
typedef MethodBinder<GlobalTimeSyncSlave*,
void (GlobalTimeSyncSlave::*)(const ReceivedDataStructure<protocol::GlobalTimeSync>&)>
GlobalTimeSyncCallback;
Subscriber<protocol::GlobalTimeSync, GlobalTimeSyncCallback> sub_;
UtcTime prev_ts_utc_;
MonotonicTime prev_ts_mono_;
MonotonicTime last_adjustment_ts_;
enum State { Update, Adjust } state_;
NodeID master_nid_;
TransferID prev_tid_;
uint8_t prev_iface_index_;
bool suppressed_;
ISystemClock& getSystemClock() const { return sub_.getNode().getSystemClock(); }
void adjustFromMsg(const ReceivedDataStructure<protocol::GlobalTimeSync>& msg)
{
UAVCAN_ASSERT(msg.previous_transmission_timestamp_usec > 0);
const UtcDuration adjustment = UtcTime::fromUSec(msg.previous_transmission_timestamp_usec) - prev_ts_utc_;
UAVCAN_TRACE("GlobalTimeSyncSlave", "Adjustment: usec=%lli snid=%i iface=%i suppress=%i",
static_cast<long long>(adjustment.toUSec()),
int(msg.getSrcNodeID().get()), int(msg.getIfaceIndex()), int(suppressed_));
if (!suppressed_)
{
getSystemClock().adjustUtc(adjustment);
}
last_adjustment_ts_ = msg.getMonotonicTimestamp();
state_ = Update;
}
void updateFromMsg(const ReceivedDataStructure<protocol::GlobalTimeSync>& msg)
{
UAVCAN_TRACE("GlobalTimeSyncSlave", "Update: snid=%i iface=%i",
int(msg.getSrcNodeID().get()), int(msg.getIfaceIndex()));
prev_ts_utc_ = msg.getUtcTimestamp();
prev_ts_mono_ = msg.getMonotonicTimestamp();
master_nid_ = msg.getSrcNodeID();
prev_iface_index_ = msg.getIfaceIndex();
prev_tid_ = msg.getTransferID();
state_ = Adjust;
}
void processMsg(const ReceivedDataStructure<protocol::GlobalTimeSync>& msg)
{
const MonotonicDuration since_prev_msg = msg.getMonotonicTimestamp() - prev_ts_mono_;
UAVCAN_ASSERT(!since_prev_msg.isNegative());
const bool needs_init = !master_nid_.isValid() || prev_ts_mono_.isZero();
const bool switch_master = msg.getSrcNodeID() < master_nid_;
// TODO: Make configurable
const bool pub_timeout = since_prev_msg.toMSec() > protocol::GlobalTimeSync::RECOMMENDED_BROADCASTER_TIMEOUT_MS;
if (switch_master || pub_timeout || needs_init)
{
UAVCAN_TRACE("GlobalTimeSyncSlave", "Force update: needs_init=%i switch_master=%i pub_timeout=%i",
int(needs_init), int(switch_master), int(pub_timeout));
updateFromMsg(msg);
}
else if (msg.getIfaceIndex() == prev_iface_index_ && msg.getSrcNodeID() == master_nid_)
{
if (state_ == Adjust)
{
const bool msg_invalid = msg.previous_transmission_timestamp_usec == 0;
const bool wrong_tid = prev_tid_.computeForwardDistance(msg.getTransferID()) != 1;
const bool wrong_timing = since_prev_msg.toMSec() > protocol::GlobalTimeSync::MAX_BROADCASTING_PERIOD_MS;
if (msg_invalid || wrong_tid || wrong_timing)
{
UAVCAN_TRACE("GlobalTimeSyncSlave",
"Adjustment skipped: msg_invalid=%i wrong_tid=%i wrong_timing=%i",
int(msg_invalid), int(wrong_tid), int(wrong_timing));
state_ = Update;
}
}
if (state_ == Adjust)
{
adjustFromMsg(msg);
}
else
{
updateFromMsg(msg);
}
}
else
{
UAVCAN_TRACE("GlobalTimeSyncSlave", "Ignored: snid=%i iface=%i",
int(msg.getSrcNodeID().get()), int(msg.getIfaceIndex()));
}
}
void handleGlobalTimeSync(const ReceivedDataStructure<protocol::GlobalTimeSync>& msg)
{
if (msg.getTransferType() == TransferTypeMessageBroadcast)
{
processMsg(msg);
}
else
{
UAVCAN_TRACE("GlobalTimeSyncSlave", "Invalid transfer type %i", int(msg.getTransferType()));
}
}
public:
explicit GlobalTimeSyncSlave(INode& node)
: sub_(node)
, state_(Update)
, prev_iface_index_(0xFF)
, suppressed_(false)
{ }
/**
* Starts the time sync slave. Once started, it works on its own and does not require any
* attention from the application, other than to handle a clock adjustment request occasionally.
* Returns negative error code.
*/
int start()
{
return sub_.start(GlobalTimeSyncCallback(this, &GlobalTimeSyncSlave::handleGlobalTimeSync));
}
/**
* Enable or disable the suppressed mode.
*
* In suppressed mode the slave will continue tracking time sync masters in the network, but will not
* perform local clock adjustments. So it's kind of a dry run - all the time sync logic works except
* the local clock will not receive adjustments.
*
* Suppressed mode is useful for nodes that can act as a back-up clock sync masters - as long as the
* node sees a higher priority time sync master in the network, its slave will be NOT suppressed
* in order to sync the local clock with the global master. As soon as all other higher priority
* masters go down, the local node will suppress its time sync slave instance and become a new master.
*
* Suppressed mode is disabled by default.
*/
void suppress(bool suppressed) { suppressed_ = suppressed; }
bool isSuppressed() const { return suppressed_; }
/**
* If the clock sync slave sees any clock sync masters in the network, it is ACTIVE.
* When the last master times out (PUBLISHER_TIMEOUT), the slave will be INACTIVE.
* Note that immediately after start up the slave will be INACTIVE until it finds a master.
* Please read the specs to learn more.
*/
bool isActive() const
{
const MonotonicDuration since_prev_adj = getSystemClock().getMonotonic() - last_adjustment_ts_;
return !last_adjustment_ts_.isZero() &&
(since_prev_adj.toMSec() <= protocol::GlobalTimeSync::RECOMMENDED_BROADCASTER_TIMEOUT_MS);
}
/**
* Node ID of the master the slave is currently locked on.
* Returns an invalid Node ID if there's no active master.
*/
NodeID getMasterNodeID() const { return isActive() ? master_nid_ : NodeID(); }
/**
* Last time when the local clock adjustment was performed.
*/
MonotonicTime getLastAdjustmentTime() const { return last_adjustment_ts_; }
};
}
#endif // UAVCAN_PROTOCOL_GLOBAL_TIME_SYNC_SLAVE_HPP_INCLUDED
@@ -0,0 +1,286 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_LOGGER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_LOGGER_HPP_INCLUDED
#include <uavcan/time.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/protocol/debug/LogMessage.hpp>
#include <uavcan/marshal/char_array_formatter.hpp>
#include <uavcan/node/publisher.hpp>
#include <cstdlib>
#if !defined(UAVCAN_CPP_VERSION) || !defined(UAVCAN_CPP11)
# error UAVCAN_CPP_VERSION
#endif
namespace uavcan
{
/**
* External log sink interface.
* External log sink allows the application to install a hook on the logger output.
* This can be used for application-wide logging.
* Please refer to the @ref Logger class docs.
*/
class UAVCAN_EXPORT ILogSink
{
public:
typedef typename StorageType<typename protocol::debug::LogLevel::FieldTypes::value>::Type LogLevel;
virtual ~ILogSink() { }
/**
* Logger will not sink messages with a severity level lower than returned by this method.
* Default level is DEBUG.
*/
virtual LogLevel getLogLevel() const { return protocol::debug::LogLevel::DEBUG; }
/**
* Logger will call this method for every log message which severity level
* is not less than the current level of this sink.
*/
virtual void log(const protocol::debug::LogMessage& message) = 0;
};
/**
* Node logging convenience class.
*
* This class is based on the standard UAVCAN message type for logging - uavcan.protocol.debug.LogMessage.
*
* Provides logging methods of different severity; implements two sinks for the log messages:
* - Broadcast via the UAVCAN bus;
* - Sink into the application via @ref ILogSink.
*
* For each sink an individual severity threshold filter can be configured.
*/
class UAVCAN_EXPORT Logger
{
public:
typedef ILogSink::LogLevel LogLevel;
/**
* This value is higher than any valid severity value.
* Use it to completely suppress the output.
*/
static LogLevel getLogLevelAboveAll() { return (1U << protocol::debug::LogLevel::FieldTypes::value::BitLen) - 1U; }
private:
enum { DefaultTxTimeoutMs = 2000 };
Publisher<protocol::debug::LogMessage> logmsg_pub_;
protocol::debug::LogMessage msg_buf_;
LogLevel level_;
ILogSink* external_sink_;
LogLevel getExternalSinkLevel() const
{
return (external_sink_ == UAVCAN_NULLPTR) ? getLogLevelAboveAll() : external_sink_->getLogLevel();
}
public:
explicit Logger(INode& node)
: logmsg_pub_(node)
, external_sink_(UAVCAN_NULLPTR)
{
level_ = protocol::debug::LogLevel::ERROR;
setTxTimeout(MonotonicDuration::fromMSec(DefaultTxTimeoutMs));
UAVCAN_ASSERT(getTxTimeout() == MonotonicDuration::fromMSec(DefaultTxTimeoutMs));
}
/**
* Initializes the logger, does not perform any network activity.
* Must be called once before use.
* Returns negative error code.
*/
int init(const TransferPriority priority = TransferPriority::Lowest)
{
const int res = logmsg_pub_.init(priority);
if (res < 0)
{
return res;
}
return 0;
}
/**
* Logs one message. Please consider using helper methods instead of this one.
*
* The message will be broadcasted via the UAVCAN bus if the severity level of the
* message is >= severity level of the logger.
*
* The message will be reported into the external log sink if the external sink is
* installed and the severity level of the message is >= severity level of the external sink.
*
* Returns negative error code.
*/
int log(const protocol::debug::LogMessage& message)
{
int retval = 0;
if (message.level.value >= getExternalSinkLevel())
{
external_sink_->log(message);
}
if (message.level.value >= level_)
{
retval = logmsg_pub_.broadcast(message);
}
return retval;
}
/**
* Severity filter for UAVCAN broadcasting.
* Log message will be broadcasted via the UAVCAN network only if its severity is >= getLevel().
* This does not affect the external sink.
* Default level is ERROR.
*/
LogLevel getLevel() const { return level_; }
void setLevel(LogLevel level) { level_ = level; }
/**
* External log sink allows the application to install a hook on the logger output.
* This can be used for application-wide logging.
* Null pointer means that there's no log sink (can be used to remove it).
* By default there's no log sink.
*/
ILogSink* getExternalSink() const { return external_sink_; }
void setExternalSink(ILogSink* sink) { external_sink_ = sink; }
/**
* Log message broadcast transmission timeout.
* The default value should be acceptable for any use case.
*/
MonotonicDuration getTxTimeout() const { return logmsg_pub_.getTxTimeout(); }
void setTxTimeout(MonotonicDuration val) { logmsg_pub_.setTxTimeout(val); }
/**
* Helper methods for various severity levels and with formatting support.
* These methods build a formatted log message and pass it into the method @ref log().
*
* Format string usage is a bit unusual: use "%*" for any argument type, use "%%" to print a percent character.
* No other formating options are supported. Insufficient/extra arguments are ignored.
*
* Example format string:
* "What do you get if you %* %* by %*? %*. Extra arguments: %* %* %%"
* ...with the following arguments:
* "multiply", 6, 9.0F 4.2e1
* ...will likely produce this (floating point representation is platform dependent):
* "What do you get if you multiply 6 by 9.000000? 42.000000. Extra arguments: %* %* %"
*
* Formatting is not supported in C++03 mode.
* @{
*/
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
template <typename... Args>
int log(LogLevel level, const char* source, const char* format, Args... args) UAVCAN_NOEXCEPT;
template <typename... Args>
inline int logDebug(const char* source, const char* format, Args... args) UAVCAN_NOEXCEPT
{
return log(protocol::debug::LogLevel::DEBUG, source, format, args...);
}
template <typename... Args>
inline int logInfo(const char* source, const char* format, Args... args) UAVCAN_NOEXCEPT
{
return log(protocol::debug::LogLevel::INFO, source, format, args...);
}
template <typename... Args>
inline int logWarning(const char* source, const char* format, Args... args) UAVCAN_NOEXCEPT
{
return log(protocol::debug::LogLevel::WARNING, source, format, args...);
}
template <typename... Args>
inline int logError(const char* source, const char* format, Args... args) UAVCAN_NOEXCEPT
{
return log(protocol::debug::LogLevel::ERROR, source, format, args...);
}
#else
int log(LogLevel level, const char* source, const char* text) UAVCAN_NOEXCEPT
{
#if UAVCAN_EXCEPTIONS
try
#endif
{
if (level >= level_ || level >= getExternalSinkLevel())
{
msg_buf_.level.value = level;
msg_buf_.source = source;
msg_buf_.text = text;
return log(msg_buf_);
}
return 0;
}
#if UAVCAN_EXCEPTIONS
catch (...)
{
return -ErrFailure;
}
#endif
}
int logDebug(const char* source, const char* text) UAVCAN_NOEXCEPT
{
return log(protocol::debug::LogLevel::DEBUG, source, text);
}
int logInfo(const char* source, const char* text) UAVCAN_NOEXCEPT
{
return log(protocol::debug::LogLevel::INFO, source, text);
}
int logWarning(const char* source, const char* text) UAVCAN_NOEXCEPT
{
return log(protocol::debug::LogLevel::WARNING, source, text);
}
int logError(const char* source, const char* text) UAVCAN_NOEXCEPT
{
return log(protocol::debug::LogLevel::ERROR, source, text);
}
#endif
/**
* @}
*/
};
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
template <typename... Args>
int Logger::log(LogLevel level, const char* source, const char* format, Args... args) UAVCAN_NOEXCEPT
{
#if UAVCAN_EXCEPTIONS
try
#endif
{
if (level >= level_ || level >= getExternalSinkLevel())
{
msg_buf_.level.value = level;
msg_buf_.source = source;
msg_buf_.text.clear();
CharArrayFormatter<typename protocol::debug::LogMessage::FieldTypes::text> formatter(msg_buf_.text);
formatter.write(format, args...);
return log(msg_buf_);
}
return 0;
}
#if UAVCAN_EXCEPTIONS
catch (...)
{
return -ErrFailure;
}
#endif
}
#endif
}
#endif // UAVCAN_PROTOCOL_LOGGER_HPP_INCLUDED
@@ -0,0 +1,464 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_NODE_INFO_RETRIEVER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_NODE_INFO_RETRIEVER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/util/multiset.hpp>
#include <uavcan/node/service_client.hpp>
#include <uavcan/node/timer.hpp>
#include <uavcan/protocol/node_status_monitor.hpp>
#include <uavcan/protocol/GetNodeInfo.hpp>
namespace uavcan
{
/**
* Classes that need to receive GetNodeInfo responses should implement this interface.
*/
class UAVCAN_EXPORT INodeInfoListener
{
public:
/**
* Called when a response to GetNodeInfo request is received. This happens shortly after the node restarts or
* becomes online for the first time.
* @param node_id Node ID of the node
* @param response Node info struct
*/
virtual void handleNodeInfoRetrieved(NodeID node_id, const protocol::GetNodeInfo::Response& node_info) = 0;
/**
* Called when the retriever decides that the node does not support the GetNodeInfo service.
* This method will never be called if the number of attempts is unlimited.
*/
virtual void handleNodeInfoUnavailable(NodeID node_id) = 0;
/**
* This call is routed directly from @ref NodeStatusMonitor.
* Default implementation does nothing.
* @param event Node status change event
*/
virtual void handleNodeStatusChange(const NodeStatusMonitor::NodeStatusChangeEvent& event)
{
(void)event;
}
/**
* This call is routed directly from @ref NodeStatusMonitor.
* Default implementation does nothing.
* @param msg Node status message
*/
virtual void handleNodeStatusMessage(const ReceivedDataStructure<protocol::NodeStatus>& msg)
{
(void)msg;
}
virtual ~INodeInfoListener() { }
};
/**
* This class automatically retrieves a response to GetNodeInfo once a node appears online or restarts.
* It does a number of attempts in case if there's a communication failure before assuming that the node does not
* implement the GetNodeInfo service. All parameters are pre-configured with sensible default values that should fit
* virtually any use case, but they can be overriden if needed - refer to the setter methods below for details.
*
* Defaults are pre-configured so that the class is able to query 123 nodes (node ID 1..125, where 1 is our local
* node and 1 is one node that implements GetNodeInfo service, hence 123) of which none implements GetNodeInfo
* service in under 5 seconds. The 5 second limitation is imposed by UAVCAN-compatible bootloaders, which are
* unlikely to wait for more than that before continuing to boot. In case if this default value is not appropriate
* for the end application, the request interval can be overriden via @ref setRequestInterval().
*
* Following the above explained requirements, the default request interval is defined as follows:
* request interval [ms] = floor(5000 [ms] bootloader timeout / 123 nodes)
* Which yields 40 ms.
*
* Given default service timeout 1000 ms and the defined above request frequency 40 ms, the maximum number of
* concurrent requests will be:
* max concurrent requests = ceil(1000 [ms] timeout / 40 [ms] request interval)
* Which yields 25 requests.
*
* Keep the above equations in mind when changing the default request interval.
*
* Obviously, if all calls are completing in under (request interval), the number of concurrent requests will never
* exceed one. This is actually the most likely scenario.
*
* Note that all nodes are queried in a round-robin fashion, regardless of their uptime, number of requests made, etc.
*
* Events from this class can be routed to many listeners, @ref INodeInfoListener.
*/
class UAVCAN_EXPORT NodeInfoRetriever : public NodeStatusMonitor
, TimerBase
{
public:
enum { MaxNumRequestAttempts = 254 };
enum { UnlimitedRequestAttempts = 0 };
private:
typedef MethodBinder<NodeInfoRetriever*,
void (NodeInfoRetriever::*)(const ServiceCallResult<protocol::GetNodeInfo>&)>
GetNodeInfoResponseCallback;
struct Entry
{
uint32_t uptime_sec;
uint8_t num_attempts_made;
bool request_needed; ///< Always false for unknown nodes
bool updated_since_last_attempt; ///< Always false for unknown nodes
Entry()
: uptime_sec(0)
, num_attempts_made(0)
, request_needed(false)
, updated_since_last_attempt(false)
{
#if UAVCAN_DEBUG
StaticAssert<sizeof(Entry) <= 8>::check();
#endif
}
};
struct NodeInfoRetrievedHandlerCaller
{
const NodeID node_id;
const protocol::GetNodeInfo::Response& node_info;
NodeInfoRetrievedHandlerCaller(NodeID arg_node_id, const protocol::GetNodeInfo::Response& arg_node_info)
: node_id(arg_node_id)
, node_info(arg_node_info)
{ }
bool operator()(INodeInfoListener* key)
{
UAVCAN_ASSERT(key != UAVCAN_NULLPTR);
key->handleNodeInfoRetrieved(node_id, node_info);
return false;
}
};
template <typename Event>
struct GenericHandlerCaller
{
void (INodeInfoListener::* const method)(Event);
Event event;
GenericHandlerCaller(void (INodeInfoListener::*arg_method)(Event), Event arg_event)
: method(arg_method)
, event(arg_event)
{ }
bool operator()(INodeInfoListener* key)
{
UAVCAN_ASSERT(key != UAVCAN_NULLPTR);
(key->*method)(event);
return false;
}
};
enum { DefaultNumRequestAttempts = 16 };
enum { DefaultTimerIntervalMSec = 40 }; ///< Read explanation in the class documentation
/*
* State
*/
Entry entries_[NodeID::Max]; // [1, NodeID::Max]
Multiset<INodeInfoListener*> listeners_;
ServiceClient<protocol::GetNodeInfo, GetNodeInfoResponseCallback> get_node_info_client_;
MonotonicDuration request_interval_;
mutable uint8_t last_picked_node_;
uint8_t num_attempts_;
/*
* Methods
*/
const Entry& getEntry(NodeID node_id) const { return const_cast<NodeInfoRetriever*>(this)->getEntry(node_id); }
Entry& getEntry(NodeID node_id)
{
if (node_id.get() < 1 || node_id.get() > NodeID::Max)
{
handleFatalError("NodeInfoRetriever NodeID");
}
return entries_[node_id.get() - 1];
}
void startTimerIfNotRunning()
{
if (!TimerBase::isRunning())
{
TimerBase::startPeriodic(request_interval_);
UAVCAN_TRACE("NodeInfoRetriever", "Timer started, interval %s sec", request_interval_.toString().c_str());
}
}
NodeID pickNextNodeToQuery(bool& out_at_least_one_request_needed) const
{
out_at_least_one_request_needed = false;
for (unsigned iter_cnt_ = 0; iter_cnt_ < (sizeof(entries_) / sizeof(entries_[0])); iter_cnt_++) // Round-robin
{
last_picked_node_++;
if (last_picked_node_ > NodeID::Max)
{
last_picked_node_ = 1;
}
UAVCAN_ASSERT((last_picked_node_ >= 1) &&
(last_picked_node_ <= NodeID::Max));
const Entry& entry = getEntry(last_picked_node_);
if (entry.request_needed)
{
out_at_least_one_request_needed = true;
if (entry.updated_since_last_attempt &&
!get_node_info_client_.hasPendingCallToServer(last_picked_node_))
{
UAVCAN_TRACE("NodeInfoRetriever", "Next node to query: %d", int(last_picked_node_));
return NodeID(last_picked_node_);
}
}
}
return NodeID(); // No node could be found
}
virtual void handleTimerEvent(const TimerEvent&)
{
bool at_least_one_request_needed = false;
const NodeID next = pickNextNodeToQuery(at_least_one_request_needed);
if (next.isUnicast())
{
UAVCAN_ASSERT(at_least_one_request_needed);
getEntry(next).updated_since_last_attempt = false;
const int res = get_node_info_client_.call(next, protocol::GetNodeInfo::Request());
if (res < 0)
{
get_node_info_client_.getNode().registerInternalFailure("NodeInfoRetriever GetNodeInfo call");
}
}
else
{
if (!at_least_one_request_needed)
{
TimerBase::stop();
UAVCAN_TRACE("NodeInfoRetriever", "Timer stopped");
}
}
}
virtual void handleNodeStatusChange(const NodeStatusChangeEvent& event)
{
const bool was_offline = !event.was_known ||
(event.old_status.mode == protocol::NodeStatus::MODE_OFFLINE);
const bool offline_now = event.status.mode == protocol::NodeStatus::MODE_OFFLINE;
if (was_offline || offline_now)
{
Entry& entry = getEntry(event.node_id);
entry.request_needed = !offline_now;
entry.num_attempts_made = 0;
UAVCAN_TRACE("NodeInfoRetriever", "Offline status change: node ID %d, request needed: %d",
int(event.node_id.get()), int(entry.request_needed));
if (entry.request_needed)
{
startTimerIfNotRunning();
}
}
listeners_.forEach(
GenericHandlerCaller<const NodeStatusChangeEvent&>(&INodeInfoListener::handleNodeStatusChange, event));
}
virtual void handleNodeStatusMessage(const ReceivedDataStructure<protocol::NodeStatus>& msg)
{
Entry& entry = getEntry(msg.getSrcNodeID());
if (msg.uptime_sec < entry.uptime_sec)
{
entry.request_needed = true;
entry.num_attempts_made = 0;
startTimerIfNotRunning();
}
entry.uptime_sec = msg.uptime_sec;
entry.updated_since_last_attempt = true;
listeners_.forEach(GenericHandlerCaller<const ReceivedDataStructure<protocol::NodeStatus>&>(
&INodeInfoListener::handleNodeStatusMessage, msg));
}
void handleGetNodeInfoResponse(const ServiceCallResult<protocol::GetNodeInfo>& result)
{
Entry& entry = getEntry(result.getCallID().server_node_id);
if (result.isSuccessful())
{
/*
* Updating the uptime here allows to properly handle a corner case where the service response arrives
* after the device has restarted and published its new NodeStatus (although it's unlikely to happen).
*/
entry.uptime_sec = result.getResponse().status.uptime_sec;
entry.request_needed = false;
listeners_.forEach(NodeInfoRetrievedHandlerCaller(result.getCallID().server_node_id,
result.getResponse()));
}
else
{
if (num_attempts_ != UnlimitedRequestAttempts)
{
entry.num_attempts_made++;
if (entry.num_attempts_made >= num_attempts_)
{
entry.request_needed = false;
listeners_.forEach(GenericHandlerCaller<NodeID>(&INodeInfoListener::handleNodeInfoUnavailable,
result.getCallID().server_node_id));
}
}
}
}
public:
NodeInfoRetriever(INode& node)
: NodeStatusMonitor(node)
, TimerBase(node)
, listeners_(node.getAllocator())
, get_node_info_client_(node)
, request_interval_(MonotonicDuration::fromMSec(DefaultTimerIntervalMSec))
, last_picked_node_(1)
, num_attempts_(DefaultNumRequestAttempts)
{ }
/**
* Starts the retriever.
* Destroy the object to stop it.
* Returns negative error code.
*/
int start(const TransferPriority priority = TransferPriority::OneHigherThanLowest)
{
int res = NodeStatusMonitor::start();
if (res < 0)
{
return res;
}
res = get_node_info_client_.init(priority);
if (res < 0)
{
return res;
}
get_node_info_client_.setCallback(GetNodeInfoResponseCallback(this,
&NodeInfoRetriever::handleGetNodeInfoResponse));
// Note: the timer will be started ad-hoc
return 0;
}
/**
* This method forces the class to re-request uavcan.protocol.GetNodeInfo from all nodes as if they
* have just appeared in the network.
*/
void invalidateAll()
{
NodeStatusMonitor::forgetAllNodes();
get_node_info_client_.cancelAllCalls();
for (unsigned i = 0; i < (sizeof(entries_) / sizeof(entries_[0])); i++)
{
entries_[i] = Entry();
}
// It is not necessary to reset the last picked node index
}
/**
* Adds one listener. Does nothing if such listener already exists.
* May return -ErrMemory if there's no space to add the listener.
*/
int addListener(INodeInfoListener* listener)
{
if (listener != UAVCAN_NULLPTR)
{
removeListener(listener);
return (UAVCAN_NULLPTR == listeners_.emplace(listener)) ? -ErrMemory : 0;
}
else
{
return -ErrInvalidParam;
}
}
/**
* Removes the listener.
* If the listener was not registered, nothing will be done.
*/
void removeListener(INodeInfoListener* listener)
{
if (listener != UAVCAN_NULLPTR)
{
listeners_.removeAll(listener);
}
else
{
UAVCAN_ASSERT(0);
}
}
unsigned getNumListeners() const { return listeners_.getSize(); }
/**
* Number of attempts to retrieve GetNodeInfo response before giving up on the assumption that the service is
* not implemented.
* Zero is a special value that can be used to set unlimited number of attempts, @ref UnlimitedRequestAttempts.
*/
uint8_t getNumRequestAttempts() const { return num_attempts_; }
void setNumRequestAttempts(const uint8_t num)
{
num_attempts_ = min(static_cast<uint8_t>(MaxNumRequestAttempts), num);
}
/**
* Request interval also implicitly defines the maximum number of concurrent requests.
* Read the class documentation for details.
*/
MonotonicDuration getRequestInterval() const { return request_interval_; }
void setRequestInterval(const MonotonicDuration interval)
{
if (interval.isPositive())
{
request_interval_ = interval;
if (TimerBase::isRunning())
{
TimerBase::startPeriodic(request_interval_);
}
}
else
{
UAVCAN_ASSERT(0);
}
}
/**
* These methods are needed mostly for testing.
*/
bool isRetrievingInProgress() const { return TimerBase::isRunning(); }
uint8_t getNumPendingRequests() const
{
const unsigned num = get_node_info_client_.getNumPendingCalls();
UAVCAN_ASSERT(num <= 0xFF);
return static_cast<uint8_t>(num);
}
};
}
#endif // Include guard
@@ -0,0 +1,320 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_NODE_STATUS_MONITOR_HPP_INCLUDED
#define UAVCAN_PROTOCOL_NODE_STATUS_MONITOR_HPP_INCLUDED
#include <uavcan/debug.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/node/subscriber.hpp>
#include <uavcan/node/timer.hpp>
#include <uavcan/protocol/NodeStatus.hpp>
#include <cassert>
#include <cstdlib>
namespace uavcan
{
/**
* This class implements the core functionality of a network monitor.
* It can be extended by inheritance to add more complex logic, or used directly as is.
*/
class UAVCAN_EXPORT NodeStatusMonitor
{
public:
struct NodeStatus
{
uint8_t health : 2;
uint8_t mode : 3;
uint8_t sub_mode : 3;
NodeStatus() :
health(protocol::NodeStatus::HEALTH_CRITICAL),
mode(protocol::NodeStatus::MODE_OFFLINE),
sub_mode(0)
{
StaticAssert<protocol::NodeStatus::FieldTypes::health::BitLen == 2>::check();
StaticAssert<protocol::NodeStatus::FieldTypes::mode::BitLen == 3>::check();
StaticAssert<protocol::NodeStatus::FieldTypes::sub_mode::BitLen == 3>::check();
}
bool operator!=(const NodeStatus rhs) const { return !operator==(rhs); }
bool operator==(const NodeStatus rhs) const
{
return std::memcmp(this, &rhs, sizeof(NodeStatus)) == 0;
}
#if UAVCAN_TOSTRING
std::string toString() const
{
char buf[40];
(void)snprintf(buf, sizeof(buf), "health=%d mode=%d sub_mode=%d", int(health), int(mode), int(sub_mode));
return std::string(buf);
}
#endif
};
struct NodeStatusChangeEvent
{
NodeID node_id;
NodeStatus status;
NodeStatus old_status;
bool was_known;
NodeStatusChangeEvent() :
was_known(false)
{ }
};
private:
enum { TimerPeriodMs100 = 2 };
typedef MethodBinder<NodeStatusMonitor*,
void (NodeStatusMonitor::*)(const ReceivedDataStructure<protocol::NodeStatus>&)>
NodeStatusCallback;
typedef MethodBinder<NodeStatusMonitor*, void (NodeStatusMonitor::*)(const TimerEvent&)> TimerCallback;
Subscriber<protocol::NodeStatus, NodeStatusCallback> sub_;
TimerEventForwarder<TimerCallback> timer_;
struct Entry
{
NodeStatus status;
int8_t time_since_last_update_ms100;
Entry() :
time_since_last_update_ms100(-1)
{ }
};
mutable Entry entries_[NodeID::Max]; // [1, NodeID::Max]
Entry& getEntry(NodeID node_id) const
{
if (node_id.get() < 1 || node_id.get() > NodeID::Max)
{
handleFatalError("NodeStatusMonitor NodeID");
}
return entries_[node_id.get() - 1];
}
void changeNodeStatus(const NodeID node_id, const Entry new_entry_value)
{
Entry& entry = getEntry(node_id);
if (entry.status != new_entry_value.status)
{
NodeStatusChangeEvent event;
event.node_id = node_id;
event.old_status = entry.status;
event.status = new_entry_value.status;
event.was_known = entry.time_since_last_update_ms100 >= 0;
UAVCAN_TRACE("NodeStatusMonitor", "Node %i [%s] status change: [%s] --> [%s]", int(node_id.get()),
(event.was_known ? "known" : "new"),
event.old_status.toString().c_str(), event.status.toString().c_str());
handleNodeStatusChange(event);
}
entry = new_entry_value;
}
void handleNodeStatus(const ReceivedDataStructure<protocol::NodeStatus>& msg)
{
Entry new_entry;
new_entry.time_since_last_update_ms100 = 0;
new_entry.status.health = msg.health & ((1 << protocol::NodeStatus::FieldTypes::health::BitLen) - 1);
new_entry.status.mode = msg.mode & ((1 << protocol::NodeStatus::FieldTypes::mode::BitLen) - 1);
new_entry.status.sub_mode = msg.sub_mode & ((1 << protocol::NodeStatus::FieldTypes::sub_mode::BitLen) - 1);
changeNodeStatus(msg.getSrcNodeID(), new_entry);
handleNodeStatusMessage(msg);
}
void handleTimerEvent(const TimerEvent&)
{
const int OfflineTimeoutMs100 = protocol::NodeStatus::OFFLINE_TIMEOUT_MS / 100;
for (uint8_t i = 1; i <= NodeID::Max; i++)
{
Entry& entry = getEntry(i);
if (entry.time_since_last_update_ms100 >= 0 &&
entry.status.mode != protocol::NodeStatus::MODE_OFFLINE)
{
entry.time_since_last_update_ms100 =
int8_t(entry.time_since_last_update_ms100 + int8_t(TimerPeriodMs100));
if (entry.time_since_last_update_ms100 > OfflineTimeoutMs100)
{
Entry new_entry_value = entry;
new_entry_value.time_since_last_update_ms100 = OfflineTimeoutMs100;
new_entry_value.status.mode = protocol::NodeStatus::MODE_OFFLINE;
changeNodeStatus(i, new_entry_value);
}
}
}
}
protected:
/**
* Called when a node becomes online, changes status or goes offline.
* Refer to uavcan.protocol.NodeStatus for the offline timeout value.
* Overriding is not required.
*/
virtual void handleNodeStatusChange(const NodeStatusChangeEvent& event)
{
(void)event;
}
/**
* Called for every received message uavcan.protocol.NodeStatus after handleNodeStatusChange(), even
* if the status code did not change.
* Overriding is not required.
*/
virtual void handleNodeStatusMessage(const ReceivedDataStructure<protocol::NodeStatus>& msg)
{
(void)msg;
}
public:
explicit NodeStatusMonitor(INode& node)
: sub_(node)
, timer_(node)
{ }
virtual ~NodeStatusMonitor() { }
/**
* Starts the monitor.
* Destroy the object to stop it.
* Returns negative error code.
*/
int start()
{
const int res = sub_.start(NodeStatusCallback(this, &NodeStatusMonitor::handleNodeStatus));
if (res >= 0)
{
timer_.setCallback(TimerCallback(this, &NodeStatusMonitor::handleTimerEvent));
timer_.startPeriodic(MonotonicDuration::fromMSec(TimerPeriodMs100 * 100));
}
return res;
}
/**
* Make the node unknown.
*/
void forgetNode(NodeID node_id)
{
if (node_id.isValid())
{
Entry& entry = getEntry(node_id);
entry = Entry();
}
else
{
UAVCAN_ASSERT(0);
}
}
/**
* Make all nodes unknown.
*/
void forgetAllNodes()
{
for (unsigned i = 0; i < (sizeof(entries_) / sizeof(entries_[0])); i++)
{
entries_[i] = Entry();
}
}
/**
* Returns status of a given node.
* Unknown nodes are considered offline.
* Complexity O(1).
*/
NodeStatus getNodeStatus(NodeID node_id) const
{
if (!node_id.isValid())
{
UAVCAN_ASSERT(0);
return NodeStatus();
}
const Entry& entry = getEntry(node_id);
if (entry.time_since_last_update_ms100 >= 0)
{
return entry.status;
}
else
{
return NodeStatus();
}
}
/**
* Whether the class has observed this node at least once since initialization.
* Complexity O(1).
*/
bool isNodeKnown(NodeID node_id) const
{
if (!node_id.isValid())
{
UAVCAN_ASSERT(0);
return false;
}
return getEntry(node_id).time_since_last_update_ms100 >= 0;
}
/**
* This helper method allows to quickly estimate the overall network health.
* Health of the local node is not considered.
* Returns an invalid Node ID value if there's no known nodes in the network.
*/
NodeID findNodeWithWorstHealth() const
{
NodeID nid_with_worst_health;
uint8_t worst_health = protocol::NodeStatus::HEALTH_OK;
for (uint8_t i = 1; i <= NodeID::Max; i++)
{
const NodeID nid(i);
UAVCAN_ASSERT(nid.isUnicast());
const Entry& entry = getEntry(nid);
if (entry.time_since_last_update_ms100 >= 0)
{
if (entry.status.health > worst_health || !nid_with_worst_health.isValid())
{
nid_with_worst_health = nid;
worst_health = entry.status.health;
}
}
}
return nid_with_worst_health;
}
/**
* Calls the operator for every known node.
* Operator signature:
* void (NodeID, NodeStatus)
*/
template <typename Operator>
void forEachNode(Operator op) const
{
for (uint8_t i = 1; i <= NodeID::Max; i++)
{
const NodeID nid(i);
UAVCAN_ASSERT(nid.isUnicast());
const Entry& entry = getEntry(nid);
if (entry.time_since_last_update_ms100 >= 0)
{
op(nid, entry.status);
}
}
}
};
}
#endif // UAVCAN_PROTOCOL_NODE_STATUS_MONITOR_HPP_INCLUDED
@@ -0,0 +1,174 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_NODE_STATUS_PROVIDER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_NODE_STATUS_PROVIDER_HPP_INCLUDED
#include <uavcan/node/publisher.hpp>
#include <uavcan/node/subscriber.hpp>
#include <uavcan/node/service_server.hpp>
#include <uavcan/node/timer.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/protocol/NodeStatus.hpp>
#include <uavcan/protocol/GetNodeInfo.hpp>
namespace uavcan
{
/**
* This optional interface can be implemented by the user in order to update the node status as necessary,
* immediately before the next NodeStatus message is emitted by @ref NodeStatusProvider.
*/
class IAdHocNodeStatusUpdater
{
public:
/**
* This method is invoked by the library from @ref NodeStatusProvider from the library's thread immediately
* before the next NodeStatus message is transmitted. The application can implement this method to perform
* node status updates only as necessary.
* The application is expected to invoke the methods of @ref NodeStatusProvider to update the status
* from this method.
* Note that this method is only invoked when publication is happening by the timer event.
* It will NOT be invoked if the following methods are used to trigger node status publication:
* - @ref NodeStatusProvider::startAndPublish()
* - @ref NodeStatusProvider::forcePublish()
*/
virtual void updateNodeStatus() = 0;
virtual ~IAdHocNodeStatusUpdater() { }
};
/**
* Provides the status and basic information about this node to other network participants.
*
* Usually the application does not need to deal with this class directly - it's instantiated by the node class.
*
* Default values:
* - health - OK
* - mode - INITIALIZATION
*/
class UAVCAN_EXPORT NodeStatusProvider : private TimerBase
{
typedef MethodBinder<NodeStatusProvider*,
void (NodeStatusProvider::*)(const protocol::GetNodeInfo::Request&,
protocol::GetNodeInfo::Response&)> GetNodeInfoCallback;
const MonotonicTime creation_timestamp_;
Publisher<protocol::NodeStatus> node_status_pub_;
ServiceServer<protocol::GetNodeInfo, GetNodeInfoCallback> gni_srv_;
protocol::GetNodeInfo::Response node_info_;
IAdHocNodeStatusUpdater* ad_hoc_status_updater_;
INode& getNode() { return node_status_pub_.getNode(); }
bool isNodeInfoInitialized() const;
int publish();
virtual void handleTimerEvent(const TimerEvent&) override;
void handleGetNodeInfoRequest(const protocol::GetNodeInfo::Request&, protocol::GetNodeInfo::Response& rsp);
public:
typedef typename StorageType<typename protocol::NodeStatus::FieldTypes::vendor_specific_status_code>::Type
VendorSpecificStatusCode;
typedef typename StorageType<typename protocol::GetNodeInfo::Response::FieldTypes::name>::Type NodeName;
explicit NodeStatusProvider(INode& node)
: TimerBase(node)
, creation_timestamp_(node.getMonotonicTime())
, node_status_pub_(node)
, gni_srv_(node)
, ad_hoc_status_updater_(UAVCAN_NULLPTR)
{
UAVCAN_ASSERT(!creation_timestamp_.isZero());
node_info_.status.mode = protocol::NodeStatus::MODE_INITIALIZATION;
node_info_.status.health = protocol::NodeStatus::HEALTH_OK;
}
/**
* Starts the provider and immediately broadcasts uavcan.protocol.NodeStatus.
* Returns negative error code.
*/
int startAndPublish(const TransferPriority priority = TransferPriority::Default);
/**
* Publish the message uavcan.protocol.NodeStatus right now, out of schedule.
* Returns negative error code.
*/
int forcePublish() { return publish(); }
/**
* Allows to override default publishing rate for uavcan.protocol.NodeStatus.
* Refer to the DSDL definition of uavcan.protocol.NodeStatus to see what is the default rate.
* Doesn't fail; if the value is outside of acceptable range, a closest valid value will be used instead.
*/
void setStatusPublicationPeriod(uavcan::MonotonicDuration period);
uavcan::MonotonicDuration getStatusPublicationPeriod() const;
/**
* Configure the optional handler that is invoked before every node status message is emitted.
* By default no handler is installed.
* It is allowed to pass a null pointer, that will disable the ad-hoc update feature.
* @ref IAdHocNodeStatusUpdater
*/
void setAdHocNodeStatusUpdater(IAdHocNodeStatusUpdater* updater);
IAdHocNodeStatusUpdater* getAdHocNodeStatusUpdater() const { return ad_hoc_status_updater_; }
/**
* Local node health code control.
*/
uint8_t getHealth() const { return node_info_.status.health; }
void setHealth(uint8_t code);
void setHealthOk() { setHealth(protocol::NodeStatus::HEALTH_OK); }
void setHealthWarning() { setHealth(protocol::NodeStatus::HEALTH_WARNING); }
void setHealthError() { setHealth(protocol::NodeStatus::HEALTH_ERROR); }
void setHealthCritical() { setHealth(protocol::NodeStatus::HEALTH_CRITICAL); }
/**
* Local node mode code control.
*/
uint8_t getMode() const { return node_info_.status.mode; }
void setMode(uint8_t code);
void setModeOperational() { setMode(protocol::NodeStatus::MODE_OPERATIONAL); }
void setModeInitialization() { setMode(protocol::NodeStatus::MODE_INITIALIZATION); }
void setModeMaintenance() { setMode(protocol::NodeStatus::MODE_MAINTENANCE); }
void setModeSoftwareUpdate() { setMode(protocol::NodeStatus::MODE_SOFTWARE_UPDATE); }
void setModeOffline() { setMode(protocol::NodeStatus::MODE_OFFLINE); }
/**
* Local node vendor-specific status code control.
*/
void setVendorSpecificStatusCode(VendorSpecificStatusCode code);
VendorSpecificStatusCode getVendorSpecificStatusCode() const
{
return node_info_.status.vendor_specific_status_code;
}
/**
* Local node name control.
* Can be set only once before the provider is started.
* The provider will refuse to start if the node name is not set.
*/
const NodeName& getName() const { return node_info_.name; }
void setName(const NodeName& name);
/**
* Node version information.
* Can be set only once before the provider is started.
*/
const protocol::SoftwareVersion& getSoftwareVersion() const { return node_info_.software_version; }
const protocol::HardwareVersion& getHardwareVersion() const { return node_info_.hardware_version; }
void setSoftwareVersion(const protocol::SoftwareVersion& version);
void setHardwareVersion(const protocol::HardwareVersion& version);
};
}
#endif // UAVCAN_PROTOCOL_NODE_STATUS_PROVIDER_HPP_INCLUDED
@@ -0,0 +1,94 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_PANIC_BROADCASTER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_PANIC_BROADCASTER_HPP_INCLUDED
#include <uavcan/debug.hpp>
#include <uavcan/node/publisher.hpp>
#include <uavcan/node/timer.hpp>
#include <uavcan/protocol/Panic.hpp>
namespace uavcan
{
/**
* Helper for broadcasting the message uavcan.protocol.Panic.
*/
class UAVCAN_EXPORT PanicBroadcaster : private TimerBase
{
Publisher<protocol::Panic> pub_;
protocol::Panic msg_;
void publishOnce()
{
const int res = pub_.broadcast(msg_);
if (res < 0)
{
pub_.getNode().registerInternalFailure("Panic pub failed");
}
}
virtual void handleTimerEvent(const TimerEvent&)
{
publishOnce();
}
public:
explicit PanicBroadcaster(INode& node)
: TimerBase(node)
, pub_(node)
{ }
/**
* This method does not block and can't fail.
* @param short_reason Short ASCII string that describes the reason of the panic, 7 characters max.
* If the string exceeds 7 characters, it will be truncated.
* @param broadcasting_period Broadcasting period. Optional.
* @param priority Transfer priority. Optional.
*/
void panic(const char* short_reason_description,
MonotonicDuration broadcasting_period = MonotonicDuration::fromMSec(100),
const TransferPriority priority = TransferPriority::Default)
{
msg_.reason_text.clear();
const char* p = short_reason_description;
while (p && *p)
{
if (msg_.reason_text.size() == msg_.reason_text.capacity())
{
break;
}
msg_.reason_text.push_back(protocol::Panic::FieldTypes::reason_text::ValueType(*p));
p++;
}
UAVCAN_TRACE("PanicBroadcaster", "Panicking with reason '%s'", getReason().c_str());
pub_.setTxTimeout(broadcasting_period);
pub_.setPriority(priority);
publishOnce();
startPeriodic(broadcasting_period);
}
/**
* Stop broadcasting immediately.
*/
void dontPanic() // Where's my towel
{
stop();
msg_.reason_text.clear();
}
bool isPanicking() const { return isRunning(); }
const typename protocol::Panic::FieldTypes::reason_text& getReason() const
{
return msg_.reason_text;
}
};
}
#endif // UAVCAN_PROTOCOL_PANIC_BROADCASTER_HPP_INCLUDED
@@ -0,0 +1,128 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_PANIC_LISTENER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_PANIC_LISTENER_HPP_INCLUDED
#include <cassert>
#include <uavcan/debug.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/node/subscriber.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/protocol/Panic.hpp>
#if !defined(UAVCAN_CPP_VERSION) || !defined(UAVCAN_CPP11)
# error UAVCAN_CPP_VERSION
#endif
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# include <functional>
#endif
namespace uavcan
{
/**
* This class implements proper panic detector.
* Refer to uavcan.protocol.Panic for details.
* The listener can be stopped from the callback.
*
* @tparam Callback Possible callback prototypes:
* void (const ReceivedDataStructure<protocol::Panic>&)
* void (const protocol::Panic&)
*/
template <
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
typename Callback = std::function<void (const ReceivedDataStructure<protocol::Panic>&)>
#else
typename Callback = void (*)(const ReceivedDataStructure<protocol::Panic>&)
#endif
>
class UAVCAN_EXPORT PanicListener : Noncopyable
{
typedef MethodBinder<PanicListener*, void (PanicListener::*)(const ReceivedDataStructure<protocol::Panic>&)>
PanicMsgCallback;
Subscriber<protocol::Panic, PanicMsgCallback> sub_;
MonotonicTime prev_msg_timestamp_;
Callback callback_;
uint8_t num_subsequent_msgs_;
void invokeCallback(const ReceivedDataStructure<protocol::Panic>& msg)
{
if (coerceOrFallback<bool>(callback_, true))
{
callback_(msg);
}
else
{
UAVCAN_ASSERT(0); // This is a logic error because normally we shouldn't start with an invalid callback
sub_.getNode().registerInternalFailure("PanicListener invalid callback");
}
}
void handleMsg(const ReceivedDataStructure<protocol::Panic>& msg)
{
UAVCAN_TRACE("PanicListener", "Received panic from snid=%i reason=%s",
int(msg.getSrcNodeID().get()), msg.reason_text.c_str());
if (prev_msg_timestamp_.isZero())
{
num_subsequent_msgs_ = 1;
prev_msg_timestamp_ = msg.getMonotonicTimestamp();
}
else
{
const MonotonicDuration diff = msg.getMonotonicTimestamp() - prev_msg_timestamp_;
UAVCAN_ASSERT(diff.isPositive() || diff.isZero());
if (diff.toMSec() > protocol::Panic::MAX_INTERVAL_MS)
{
num_subsequent_msgs_ = 1;
}
else
{
num_subsequent_msgs_++;
}
prev_msg_timestamp_ = msg.getMonotonicTimestamp();
if (num_subsequent_msgs_ >= protocol::Panic::MIN_MESSAGES)
{
num_subsequent_msgs_ = protocol::Panic::MIN_MESSAGES;
invokeCallback(msg); // The application can stop us from the callback
}
}
}
public:
explicit PanicListener(INode& node)
: sub_(node)
, callback_()
, num_subsequent_msgs_(0)
{ }
/**
* Start the listener.
* Once started it does not require further attention.
* Returns negative error code.
*/
int start(const Callback& callback)
{
stop();
if (!coerceOrFallback<bool>(callback, true))
{
UAVCAN_TRACE("PanicListener", "Invalid callback");
return -ErrInvalidParam;
}
callback_ = callback;
return sub_.start(PanicMsgCallback(this, &PanicListener::handleMsg));
}
void stop()
{
sub_.stop();
num_subsequent_msgs_ = 0;
prev_msg_timestamp_ = MonotonicTime();
}
};
}
#endif // UAVCAN_PROTOCOL_PANIC_LISTENER_HPP_INCLUDED
@@ -0,0 +1,192 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_PARAM_SERVER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_PARAM_SERVER_HPP_INCLUDED
#include <uavcan/debug.hpp>
#include <uavcan/protocol/param/GetSet.hpp>
#include <uavcan/protocol/param/ExecuteOpcode.hpp>
#include <uavcan/node/service_server.hpp>
#include <uavcan/util/method_binder.hpp>
#include <cassert>
#include <cstdlib>
namespace uavcan
{
/**
* Implement this interface in the application to support the standard remote reconfiguration services.
* Refer to @ref ParamServer.
*/
class UAVCAN_EXPORT IParamManager
{
public:
typedef typename StorageType<typename protocol::param::GetSet::Response::FieldTypes::name>::Type Name;
typedef typename StorageType<typename protocol::param::GetSet::Request::FieldTypes::index>::Type Index;
typedef protocol::param::Value Value;
typedef protocol::param::NumericValue NumericValue;
virtual ~IParamManager() { }
/**
* Copy the parameter name to @ref out_name if it exists, otherwise do nothing.
*/
virtual void getParamNameByIndex(Index index, Name& out_name) const = 0;
/**
* Assign by name if exists.
*/
virtual void assignParamValue(const Name& name, const Value& value) = 0;
/**
* Read by name if exists, otherwise do nothing.
*/
virtual void readParamValue(const Name& name, Value& out_value) const = 0;
/**
* Read param's default/max/min if available.
* Note that min/max are only applicable to numeric params.
* Implementation is optional.
*/
virtual void readParamDefaultMaxMin(const Name& name, Value& out_default,
NumericValue& out_max, NumericValue& out_min) const
{
(void)name;
(void)out_default;
(void)out_max;
(void)out_min;
}
/**
* Save all params to non-volatile storage.
* @return Negative if failed.
*/
virtual int saveAllParams() = 0;
/**
* Clear the non-volatile storage.
* @return Negative if failed.
*/
virtual int eraseAllParams() = 0;
};
/**
* Convenience class for supporting the standard configuration services.
* Highly recommended to use.
*/
class UAVCAN_EXPORT ParamServer
{
typedef MethodBinder<ParamServer*, void (ParamServer::*)(const protocol::param::GetSet::Request&,
protocol::param::GetSet::Response&)> GetSetCallback;
typedef MethodBinder<ParamServer*,
void (ParamServer::*)(const protocol::param::ExecuteOpcode::Request&,
protocol::param::ExecuteOpcode::Response&)> ExecuteOpcodeCallback;
ServiceServer<protocol::param::GetSet, GetSetCallback> get_set_srv_;
ServiceServer<protocol::param::ExecuteOpcode, ExecuteOpcodeCallback> save_erase_srv_;
IParamManager* manager_;
void handleGetSet(const protocol::param::GetSet::Request& in, protocol::param::GetSet::Response& out)
{
UAVCAN_ASSERT(manager_ != UAVCAN_NULLPTR);
// Recover the name from index
if (in.name.empty())
{
manager_->getParamNameByIndex(in.index, out.name);
UAVCAN_TRACE("ParamServer", "GetSet: Index %i --> '%s'", int(in.index), out.name.c_str());
}
else
{
out.name = in.name;
}
if (out.name.empty())
{
UAVCAN_TRACE("ParamServer", "GetSet: Can't resolve parameter name, index=%i", int(in.index));
return;
}
// Assign if needed, read back
if (!in.value.is(protocol::param::Value::Tag::empty))
{
manager_->assignParamValue(out.name, in.value);
}
manager_->readParamValue(out.name, out.value);
// Check if the value is OK, otherwise reset the name to indicate that we have no idea what is it all about
if (!out.value.is(protocol::param::Value::Tag::empty))
{
manager_->readParamDefaultMaxMin(out.name, out.default_value, out.max_value, out.min_value);
}
else
{
UAVCAN_TRACE("ParamServer", "GetSet: Unknown param: index=%i name='%s'", int(in.index), out.name.c_str());
out.name.clear();
}
}
void handleExecuteOpcode(const protocol::param::ExecuteOpcode::Request& in,
protocol::param::ExecuteOpcode::Response& out)
{
UAVCAN_ASSERT(manager_ != UAVCAN_NULLPTR);
if (in.opcode == protocol::param::ExecuteOpcode::Request::OPCODE_SAVE)
{
out.ok = manager_->saveAllParams() >= 0;
}
else if (in.opcode == protocol::param::ExecuteOpcode::Request::OPCODE_ERASE)
{
out.ok = manager_->eraseAllParams() >= 0;
}
else
{
UAVCAN_TRACE("ParamServer", "ExecuteOpcode: invalid opcode %i", int(in.opcode));
out.ok = false;
}
}
public:
explicit ParamServer(INode& node)
: get_set_srv_(node)
, save_erase_srv_(node)
, manager_(UAVCAN_NULLPTR)
{ }
/**
* Starts the parameter server with given param manager instance.
* Returns negative error code.
*/
int start(IParamManager* manager)
{
if (manager == UAVCAN_NULLPTR)
{
return -ErrInvalidParam;
}
manager_ = manager;
int res = get_set_srv_.start(GetSetCallback(this, &ParamServer::handleGetSet));
if (res < 0)
{
return res;
}
res = save_erase_srv_.start(ExecuteOpcodeCallback(this, &ParamServer::handleExecuteOpcode));
if (res < 0)
{
get_set_srv_.stop();
}
return res;
}
/**
* @ref IParamManager
*/
IParamManager* getParamManager() const { return manager_; }
};
}
#endif // UAVCAN_PROTOCOL_PARAM_SERVER_HPP_INCLUDED
@@ -0,0 +1,92 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_RESTART_REQUEST_SERVER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_RESTART_REQUEST_SERVER_HPP_INCLUDED
#include <uavcan/debug.hpp>
#include <uavcan/node/service_server.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/protocol/RestartNode.hpp>
namespace uavcan
{
/**
* Implement this interface in the application to support the standard node restart service.
*/
class UAVCAN_EXPORT IRestartRequestHandler
{
public:
virtual ~IRestartRequestHandler() { }
/**
* This method shall do either:
* - restart the local node immediately;
* - initiate the restart procedure to complete it asynchronously;
* - reject the restart request and return false.
*
* If the restart requets was accepted, this method shall either return true or don't return at all.
*/
virtual bool handleRestartRequest(NodeID request_source) = 0;
};
/**
* Convenience class for supporting the standard node restart service.
* Highly recommended to use.
*/
class UAVCAN_EXPORT RestartRequestServer : Noncopyable
{
typedef MethodBinder<const RestartRequestServer*,
void (RestartRequestServer::*)(const ReceivedDataStructure<protocol::RestartNode::Request>&,
protocol::RestartNode::Response&) const> RestartNodeCallback;
ServiceServer<protocol::RestartNode, RestartNodeCallback> srv_;
IRestartRequestHandler* handler_;
void handleRestartNode(const ReceivedDataStructure<protocol::RestartNode::Request>& request,
protocol::RestartNode::Response& response) const
{
UAVCAN_TRACE("RestartRequestServer", "Request from snid=%i", int(request.getSrcNodeID().get()));
response.ok = false;
if (request.magic_number == protocol::RestartNode::Request::MAGIC_NUMBER)
{
if (handler_)
{
response.ok = handler_->handleRestartRequest(request.getSrcNodeID());
}
UAVCAN_TRACE("RestartRequestServer", "%s", (response.ok ? "Accepted" : "Rejected"));
}
else
{
UAVCAN_TRACE("RestartRequestServer", "Invalid magic number 0x%llx",
static_cast<unsigned long long>(request.magic_number));
}
}
public:
explicit RestartRequestServer(INode& node)
: srv_(node)
, handler_(UAVCAN_NULLPTR)
{ }
/**
* Restart request handler configuration.
* All restart requests will be explicitly rejected if there's no handler installed.
*/
IRestartRequestHandler* getHandler() const { return handler_; }
void setHandler(IRestartRequestHandler* handler) { handler_ = handler; }
/**
* Starts the server.
* Returns negative error code.
*/
int start()
{
return srv_.start(RestartNodeCallback(this, &RestartRequestServer::handleRestartNode));
}
};
}
#endif // UAVCAN_PROTOCOL_RESTART_REQUEST_SERVER_HPP_INCLUDED
@@ -0,0 +1,65 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_PROTOCOL_TRANSPORT_STATS_PROVIDER_HPP_INCLUDED
#define UAVCAN_PROTOCOL_TRANSPORT_STATS_PROVIDER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/node/service_server.hpp>
#include <uavcan/util/method_binder.hpp>
#include <uavcan/protocol/GetTransportStats.hpp>
namespace uavcan
{
/**
* This class provides statistics about the transport layer performance on the local node.
* The user's application does not deal with this class directly because it's instantiated by the node class.
*/
class UAVCAN_EXPORT TransportStatsProvider : Noncopyable
{
typedef MethodBinder<const TransportStatsProvider*,
void (TransportStatsProvider::*)(const protocol::GetTransportStats::Request&,
protocol::GetTransportStats::Response&) const>
GetTransportStatsCallback;
ServiceServer<protocol::GetTransportStats, GetTransportStatsCallback> srv_;
void handleGetTransportStats(const protocol::GetTransportStats::Request&,
protocol::GetTransportStats::Response& resp) const
{
const TransferPerfCounter& perf = srv_.getNode().getDispatcher().getTransferPerfCounter();
resp.transfer_errors = perf.getErrorCount();
resp.transfers_tx = perf.getTxTransferCount();
resp.transfers_rx = perf.getRxTransferCount();
const CanIOManager& canio = srv_.getNode().getDispatcher().getCanIOManager();
for (uint8_t i = 0; i < canio.getNumIfaces(); i++)
{
const CanIfacePerfCounters can_perf = canio.getIfacePerfCounters(i);
protocol::CANIfaceStats stats;
stats.errors = can_perf.errors;
stats.frames_tx = can_perf.frames_tx;
stats.frames_rx = can_perf.frames_rx;
resp.can_iface_stats.push_back(stats);
}
}
public:
explicit TransportStatsProvider(INode& node)
: srv_(node)
{ }
/**
* Once started, this class requires no further attention.
* Returns negative error code.
*/
int start()
{
return srv_.start(GetTransportStatsCallback(this, &TransportStatsProvider::handleGetTransportStats));
}
};
}
#endif // UAVCAN_PROTOCOL_TRANSPORT_STATS_PROVIDER_HPP_INCLUDED
@@ -0,0 +1,86 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_STD_HPP_INCLUDED
#define UAVCAN_STD_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <cstdarg>
#include <cstddef>
#if !defined(UAVCAN_CPP_VERSION) || !defined(UAVCAN_CPP11)
# error UAVCAN_CPP_VERSION
#endif
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
# include <cstdint>
# include <cstdio>
namespace uavcan
{
typedef std::uint8_t uint8_t;
typedef std::uint16_t uint16_t;
typedef std::uint32_t uint32_t;
typedef std::uint64_t uint64_t;
typedef std::int8_t int8_t;
typedef std::int16_t int16_t;
typedef std::int32_t int32_t;
typedef std::int64_t int64_t;
}
#else
# include <stdint.h> // Standard integer types from C library
# include <stdio.h> // vsnprintf() from the C library
namespace uavcan
{
typedef ::uint8_t uint8_t;
typedef ::uint16_t uint16_t;
typedef ::uint32_t uint32_t;
typedef ::uint64_t uint64_t;
typedef ::int8_t int8_t;
typedef ::int16_t int16_t;
typedef ::int32_t int32_t;
typedef ::int64_t int64_t;
}
#endif
namespace uavcan
{
/**
* Wrapper over the standard snprintf(). This wrapper is needed because different standards and different
* implementations of C++ do not agree whether snprintf() should be defined in std:: or in ::. The solution
* is to use 'using namespace std' hack inside the wrapper, so the compiler will be able to pick whatever
* definition is available in the standard library. Alternatively, the user's application can provide a
* custom implementation of uavcan::snprintf().
*/
#if __GNUC__
__attribute__ ((format(printf, 3, 4)))
#endif
extern int snprintf(char* out, std::size_t maxlen, const char* format, ...);
#if !UAVCAN_USE_EXTERNAL_SNPRINTF
inline int snprintf(char* out, std::size_t maxlen, const char* format, ...)
{
using namespace std; // This way we can pull vsnprintf() either from std:: or from ::.
va_list args;
va_start(args, format);
const int return_value = vsnprintf(out, maxlen, format, args);
va_end(args);
return return_value;
}
#endif
}
#endif // UAVCAN_STD_HPP_INCLUDED
@@ -0,0 +1,289 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TIME_HPP_INCLUDED
#define UAVCAN_TIME_HPP_INCLUDED
#include <cstdio>
#include <uavcan/std.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/Timestamp.hpp>
namespace uavcan
{
template <typename D>
class DurationBase
{
int64_t usec_;
protected:
~DurationBase() { }
DurationBase()
: usec_(0)
{
StaticAssert<(sizeof(D) == 8)>::check();
}
public:
static D getInfinite() { return fromUSec(NumericTraits<int64_t>::max()); }
static D fromUSec(int64_t us)
{
D d;
d.usec_ = us;
return d;
}
static D fromMSec(int64_t ms) { return fromUSec(ms * 1000); }
int64_t toUSec() const { return usec_; }
int64_t toMSec() const { return usec_ / 1000; }
D getAbs() const { return D::fromUSec((usec_ < 0) ? (-usec_) : usec_); }
bool isPositive() const { return usec_ > 0; }
bool isNegative() const { return usec_ < 0; }
bool isZero() const { return usec_ == 0; }
bool operator==(const D& r) const { return usec_ == r.usec_; }
bool operator!=(const D& r) const { return !operator==(r); }
bool operator<(const D& r) const { return usec_ < r.usec_; }
bool operator>(const D& r) const { return usec_ > r.usec_; }
bool operator<=(const D& r) const { return usec_ <= r.usec_; }
bool operator>=(const D& r) const { return usec_ >= r.usec_; }
D operator+(const D& r) const { return fromUSec(usec_ + r.usec_); } // TODO: overflow check
D operator-(const D& r) const { return fromUSec(usec_ - r.usec_); } // ditto
D operator-() const { return fromUSec(-usec_); }
D& operator+=(const D& r)
{
*this = *this + r;
return *static_cast<D*>(this);
}
D& operator-=(const D& r)
{
*this = *this - r;
return *static_cast<D*>(this);
}
template <typename Scale>
D operator*(Scale scale) const { return fromUSec(usec_ * scale); }
template <typename Scale>
D& operator*=(Scale scale)
{
*this = *this * scale;
return *static_cast<D*>(this);
}
static const unsigned StringBufSize = 32;
void toString(char buf[StringBufSize]) const; ///< Prints time in seconds with microsecond resolution
#if UAVCAN_TOSTRING
std::string toString() const; ///< Prints time in seconds with microsecond resolution
#endif
};
template <typename T, typename D>
class TimeBase
{
uint64_t usec_;
protected:
~TimeBase() { }
TimeBase()
: usec_(0)
{
StaticAssert<(sizeof(T) == 8)>::check();
StaticAssert<(sizeof(D) == 8)>::check();
}
public:
static T getMax() { return fromUSec(NumericTraits<uint64_t>::max()); }
static T fromUSec(uint64_t us)
{
T d;
d.usec_ = us;
return d;
}
static T fromMSec(uint64_t ms) { return fromUSec(ms * 1000); }
uint64_t toUSec() const { return usec_; }
uint64_t toMSec() const { return usec_ / 1000; }
bool isZero() const { return usec_ == 0; }
bool operator==(const T& r) const { return usec_ == r.usec_; }
bool operator!=(const T& r) const { return !operator==(r); }
bool operator<(const T& r) const { return usec_ < r.usec_; }
bool operator>(const T& r) const { return usec_ > r.usec_; }
bool operator<=(const T& r) const { return usec_ <= r.usec_; }
bool operator>=(const T& r) const { return usec_ >= r.usec_; }
T operator+(const D& r) const
{
if (r.isNegative())
{
if (uint64_t(r.getAbs().toUSec()) > usec_)
{
return fromUSec(0);
}
}
else
{
if (uint64_t(int64_t(usec_) + r.toUSec()) < usec_)
{
return fromUSec(NumericTraits<uint64_t>::max());
}
}
return fromUSec(uint64_t(int64_t(usec_) + r.toUSec()));
}
T operator-(const D& r) const
{
return *static_cast<const T*>(this) + (-r);
}
D operator-(const T& r) const
{
return D::fromUSec((usec_ > r.usec_) ? int64_t(usec_ - r.usec_) : -int64_t(r.usec_ - usec_));
}
T& operator+=(const D& r)
{
*this = *this + r;
return *static_cast<T*>(this);
}
T& operator-=(const D& r)
{
*this = *this - r;
return *static_cast<T*>(this);
}
static const unsigned StringBufSize = 32;
void toString(char buf[StringBufSize]) const; ///< Prints time in seconds with microsecond resolution
#if UAVCAN_TOSTRING
std::string toString() const; ///< Prints time in seconds with microsecond resolution
#endif
};
/*
* Monotonic
*/
class UAVCAN_EXPORT MonotonicDuration : public DurationBase<MonotonicDuration> { };
class UAVCAN_EXPORT MonotonicTime : public TimeBase<MonotonicTime, MonotonicDuration> { };
/*
* UTC
*/
class UAVCAN_EXPORT UtcDuration : public DurationBase<UtcDuration> { };
class UAVCAN_EXPORT UtcTime : public TimeBase<UtcTime, UtcDuration> /// Implicitly convertible to/from uavcan.Timestamp
{
public:
UtcTime() { }
UtcTime(const Timestamp& ts) // Implicit
{
operator=(ts);
}
UtcTime& operator=(const Timestamp& ts)
{
*this = fromUSec(ts.usec);
return *this;
}
operator Timestamp() const
{
Timestamp ts;
ts.usec = toUSec();
return ts;
}
};
// ----------------------------------------------------------------------------
template <typename D>
const unsigned DurationBase<D>::StringBufSize;
template <typename T, typename D>
const unsigned TimeBase<T, D>::StringBufSize;
template <typename D>
void DurationBase<D>::toString(char buf[StringBufSize]) const
{
char* ptr = buf;
if (isNegative())
{
*ptr++ = '-';
}
(void)snprintf(ptr, StringBufSize - 1, "%llu.%06lu",
static_cast<unsigned long long>(getAbs().toUSec() / 1000000L),
static_cast<unsigned long>(getAbs().toUSec() % 1000000L));
}
template <typename T, typename D>
void TimeBase<T, D>::toString(char buf[StringBufSize]) const
{
(void)snprintf(buf, StringBufSize, "%llu.%06lu",
static_cast<unsigned long long>(toUSec() / 1000000UL),
static_cast<unsigned long>(toUSec() % 1000000UL));
}
#if UAVCAN_TOSTRING
template <typename D>
std::string DurationBase<D>::toString() const
{
char buf[StringBufSize];
toString(buf);
return std::string(buf);
}
template <typename T, typename D>
std::string TimeBase<T, D>::toString() const
{
char buf[StringBufSize];
toString(buf);
return std::string(buf);
}
#endif
template <typename Stream, typename D>
UAVCAN_EXPORT
Stream& operator<<(Stream& s, const DurationBase<D>& d)
{
char buf[DurationBase<D>::StringBufSize];
d.toString(buf);
s << buf;
return s;
}
template <typename Stream, typename T, typename D>
UAVCAN_EXPORT
Stream& operator<<(Stream& s, const TimeBase<T, D>& t)
{
char buf[TimeBase<T, D>::StringBufSize];
t.toString(buf);
s << buf;
return s;
}
}
#endif // UAVCAN_TIME_HPP_INCLUDED
@@ -0,0 +1,27 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_ABSTRACT_TRANSFER_BUFFER_HPP_INCLUDED
#define UAVCAN_TRANSPORT_ABSTRACT_TRANSFER_BUFFER_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/std.hpp>
namespace uavcan
{
/**
* API for transfer buffer users.
*/
class UAVCAN_EXPORT ITransferBuffer
{
public:
virtual ~ITransferBuffer() { }
virtual int read(unsigned offset, uint8_t* data, unsigned len) const = 0;
virtual int write(unsigned offset, const uint8_t* data, unsigned len) = 0;
};
}
#endif // UAVCAN_TRANSPORT_ABSTRACT_TRANSFER_BUFFER_HPP_INCLUDED
@@ -0,0 +1,171 @@
/*
* Copyright (C) 2015 Pavel Kirienko <pavel.kirienko@gmail.com>,
* Ilia Sheremet <illia.sheremet@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_CAN_ACCEPTANCE_FILTER_CONFIGURATOR_HPP_INCLUDED
#define UAVCAN_TRANSPORT_CAN_ACCEPTANCE_FILTER_CONFIGURATOR_HPP_INCLUDED
#include <cassert>
#include <uavcan/data_type.hpp>
#include <uavcan/error.hpp>
#include <uavcan/transport/dispatcher.hpp>
#include <uavcan/node/abstract_node.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/util/multiset.hpp>
namespace uavcan
{
/**
* This class configures hardware acceptance filters (if this feature is present on the particular CAN driver) to
* preclude reception of irrelevant CAN frames on the hardware level.
*
* Configuration starts by creating an object of class @ref CanAcceptanceFilterConfigurator on the stack.
* By means of computeConfiguration() method the class determines the number of available HW filters and the number
* of listeners. In case if custom configuration required, it is possible to add it through addFilterConfig().
* Subsequently obtained configurations are then loaded into the CAN driver by calling the applyConfiguration() method.
* If the cumulative number of configurations obtained by computeConfiguration() and addFilterConfig() is higher than
* the number of available HW filters, configurations will be merged automatically in the most efficient way.
*
* Note that if the application adds additional server or subscriber objects after the filters have been configured,
* the configuration procedure will have to be performed again.
*
* The maximum number of CAN acceptance filters is predefined in uavcan/build_config.hpp through a constant
* @ref MaxCanAcceptanceFilters. The algorithm doesn't allow to have higher number of HW filters configurations than
* defined by MaxCanAcceptanceFilters. You can change this value according to the number specified in your CAN driver
* datasheet.
*/
class CanAcceptanceFilterConfigurator
{
public:
/**
* These arguments defines whether acceptance filter configuration has anonymous messages or not
*/
enum AnonymousMessages
{
AcceptAnonymousMessages,
IgnoreAnonymousMessages
};
private:
/**
* Below constants based on UAVCAN transport layer specification. Masks and ID's depends on message
* TypeID, TransferID (RequestNotResponse - for service types, ServiceNotMessage - for all types of messages).
* For more details refer to uavcan.org/CAN_bus_transport_layer_specification.
* For clarity let's represent "i" as Data Type ID and "d" as Destination Node Id
* DefaultFilterMsgMask = 00000 11111111 11111111 10000000
* DefaultFilterMsgID = 00000 iiiiiiii iiiiiiii 00000000, no need to explicitly define, since MsgID initialized
* as 0.
* DefaultFilterServiceMask = 00000 00000000 01111111 10000000
* DefaultFilterServiceID = 00000 00000000 0ddddddd 10000000, all Service Response Frames are accepted by
* HW filters.
* DefaultAnonMsgMask = 00000 00000000 00000000 11111111
* DefaultAnonMsgID = 00000 00000000 00000000 00000000, by default the config is added to accept all anonymous
* frames. In case there are no anonymous messages, invoke computeConfiguration(IgnoreAnonymousMessages).
*/
static const unsigned DefaultFilterMsgMask = 0xFFFF80;
static const unsigned DefaultFilterServiceMask = 0x7F80;
static const unsigned DefaultFilterServiceID = 0x80;
static const unsigned DefaultAnonMsgMask = 0xFF;
static const unsigned DefaultAnonMsgID = 0x0;
typedef uavcan::Multiset<CanFilterConfig> MultisetConfigContainer;
static CanFilterConfig mergeFilters(CanFilterConfig& a_, CanFilterConfig& b_);
static uint8_t countBits(uint32_t n_);
uint16_t getNumFilters() const;
/**
* Fills the multiset_configs_ to proceed it with mergeConfigurations()
*/
int16_t loadInputConfiguration(AnonymousMessages load_mode);
/**
* This method merges several listeners's filter configurations by predetermined algorithm
* if number of available hardware acceptance filters less than number of listeners
*/
int16_t mergeConfigurations();
INode& node_; //< Node reference is needed for access to ICanDriver and Dispatcher
MultisetConfigContainer multiset_configs_;
uint16_t filters_number_;
public:
/**
* @param node Libuavcan node whose subscribers/servers/etc will be used to configure the filters.
*
* @param filters_number Allows to override the maximum number of hardware filters to use.
* If set to zero (which is default), the class will obtain the number of available
* filters from the CAN driver via @ref ICanIface::getNumFilters().
*/
explicit CanAcceptanceFilterConfigurator(INode& node, uint16_t filters_number = 0)
: node_(node)
, multiset_configs_(node.getAllocator())
, filters_number_(filters_number)
{ }
/**
* This method invokes loadInputConfiguration() and mergeConfigurations() consequently
* in order to comute optimal filter configurations for the current hardware.
*
* It can only be invoked when all of the subscriber and server objects are initialized.
* If new subscriber or server objects are added later, the filters will have to be reconfigured again.
*
* @param mode Either: AcceptAnonymousMessages - the filters will accept all anonymous messages (this is default)
* IgnoreAnonymousMessages - anonymous messages will be ignored
* @return 0 = success, negative for error.
*/
int computeConfiguration(AnonymousMessages mode = AcceptAnonymousMessages);
/**
* Add an additional filter configuration.
* This method must not be invoked after @ref computeConfiguration().
* @return 0 = success, negative for error.
*/
int addFilterConfig(const CanFilterConfig& config);
/**
* This method loads the configuration computed with mergeConfigurations() or explicitly added by addFilterConfig()
* to the CAN driver. Must be called after computeConfiguration() and addFilterConfig().
* @return 0 = success, negative for error.
*/
int applyConfiguration();
/**
* Returns the configuration computed with mergeConfigurations() or added by addFilterConfig().
* If mergeConfigurations() or addFilterConfig() have not been called yet, an empty configuration will be returned.
*/
const MultisetConfigContainer& getConfiguration() const
{
return multiset_configs_;
}
};
/**
* This function is a shortcut for @ref CanAcceptanceFilterConfigurator.
* It allows to compute filter configuration and then apply it in just one step.
* It implements only the most common use case; if you have special requirements,
* use @ref CanAcceptanceFilterConfigurator directly.
*
* @param node Refer to @ref CanAcceptanceFilterConfigurator constructor for explanation.
* @param mode Refer to @ref CanAcceptanceFilterConfigurator::computeConfiguration() for explanation.
* @return non-negative on success, negative error code on error.
*/
static inline int configureCanAcceptanceFilters(INode& node,
CanAcceptanceFilterConfigurator::AnonymousMessages mode
= CanAcceptanceFilterConfigurator::AcceptAnonymousMessages)
{
CanAcceptanceFilterConfigurator cfger(node);
const int compute_res = cfger.computeConfiguration(mode);
if (compute_res < 0)
{
return compute_res;
}
return cfger.applyConfiguration();
}
}
#endif
@@ -0,0 +1,186 @@
/*
* CAN bus IO logic.
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_CAN_IO_HPP_INCLUDED
#define UAVCAN_TRANSPORT_CAN_IO_HPP_INCLUDED
#include <cassert>
#include <uavcan/error.hpp>
#include <uavcan/std.hpp>
#include <uavcan/util/linked_list.hpp>
#include <uavcan/dynamic_memory.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/util/lazy_constructor.hpp>
#include <uavcan/driver/can.hpp>
#include <uavcan/driver/system_clock.hpp>
#include <uavcan/time.hpp>
namespace uavcan
{
struct UAVCAN_EXPORT CanRxFrame : public CanFrame
{
MonotonicTime ts_mono;
UtcTime ts_utc;
uint8_t iface_index;
CanRxFrame()
: iface_index(0)
{ }
#if UAVCAN_TOSTRING
std::string toString(StringRepresentation mode = StrTight) const;
#endif
};
class UAVCAN_EXPORT CanTxQueue : Noncopyable
{
public:
enum Qos { Volatile, Persistent };
struct Entry : public LinkedListNode<Entry> // Not required to be packed - fits the block in any case
{
MonotonicTime deadline;
CanFrame frame;
uint8_t qos;
CanIOFlags flags;
Entry(const CanFrame& arg_frame, MonotonicTime arg_deadline, Qos arg_qos, CanIOFlags arg_flags)
: deadline(arg_deadline)
, frame(arg_frame)
, qos(uint8_t(arg_qos))
, flags(arg_flags)
{
UAVCAN_ASSERT((qos == Volatile) || (qos == Persistent));
IsDynamicallyAllocatable<Entry>::check();
}
static void destroy(Entry*& obj, IPoolAllocator& allocator);
bool isExpired(MonotonicTime timestamp) const { return timestamp > deadline; }
bool qosHigherThan(const CanFrame& rhs_frame, Qos rhs_qos) const;
bool qosLowerThan(const CanFrame& rhs_frame, Qos rhs_qos) const;
bool qosHigherThan(const Entry& rhs) const { return qosHigherThan(rhs.frame, Qos(rhs.qos)); }
bool qosLowerThan(const Entry& rhs) const { return qosLowerThan(rhs.frame, Qos(rhs.qos)); }
#if UAVCAN_TOSTRING
std::string toString() const;
#endif
};
private:
class PriorityInsertionComparator
{
const CanFrame& frm_;
public:
explicit PriorityInsertionComparator(const CanFrame& frm) : frm_(frm) { }
bool operator()(const Entry* entry)
{
UAVCAN_ASSERT(entry);
return frm_.priorityHigherThan(entry->frame);
}
};
LinkedListRoot<Entry> queue_;
LimitedPoolAllocator allocator_;
ISystemClock& sysclock_;
uint32_t rejected_frames_cnt_;
void registerRejectedFrame();
public:
CanTxQueue(IPoolAllocator& allocator, ISystemClock& sysclock, std::size_t allocator_quota)
: allocator_(allocator, allocator_quota)
, sysclock_(sysclock)
, rejected_frames_cnt_(0)
{ }
~CanTxQueue();
void push(const CanFrame& frame, MonotonicTime tx_deadline, Qos qos, CanIOFlags flags);
Entry* peek(); // Modifier
void remove(Entry*& entry);
const CanFrame* getTopPriorityPendingFrame() const;
/// The 'or equal' condition is necessary to avoid frame reordering.
bool topPriorityHigherOrEqual(const CanFrame& rhs_frame) const;
uint32_t getRejectedFrameCount() const { return rejected_frames_cnt_; }
bool isEmpty() const { return queue_.isEmpty(); }
};
struct UAVCAN_EXPORT CanIfacePerfCounters
{
uint64_t frames_tx;
uint64_t frames_rx;
uint64_t errors;
CanIfacePerfCounters()
: frames_tx(0)
, frames_rx(0)
, errors(0)
{ }
};
class UAVCAN_EXPORT CanIOManager : Noncopyable
{
struct IfaceFrameCounters
{
uint64_t frames_tx;
uint64_t frames_rx;
IfaceFrameCounters()
: frames_tx(0)
, frames_rx(0)
{ }
};
ICanDriver& driver_;
ISystemClock& sysclock_;
LazyConstructor<CanTxQueue> tx_queues_[MaxCanIfaces];
IfaceFrameCounters counters_[MaxCanIfaces];
const uint8_t num_ifaces_;
int sendToIface(uint8_t iface_index, const CanFrame& frame, MonotonicTime tx_deadline, CanIOFlags flags);
int sendFromTxQueue(uint8_t iface_index);
int callSelect(CanSelectMasks& inout_masks, const CanFrame* (& pending_tx)[MaxCanIfaces],
MonotonicTime blocking_deadline);
public:
CanIOManager(ICanDriver& driver, IPoolAllocator& allocator, ISystemClock& sysclock,
std::size_t mem_blocks_per_iface = 0);
uint8_t getNumIfaces() const { return num_ifaces_; }
CanIfacePerfCounters getIfacePerfCounters(uint8_t iface_index) const;
const ICanDriver& getCanDriver() const { return driver_; }
ICanDriver& getCanDriver() { return driver_; }
uint8_t makePendingTxMask() const;
/**
* Returns:
* 0 - rejected/timedout/enqueued
* 1+ - sent/received
* negative - failure
*/
int send(const CanFrame& frame, MonotonicTime tx_deadline, MonotonicTime blocking_deadline,
uint8_t iface_mask, CanTxQueue::Qos qos, CanIOFlags flags);
int receive(CanRxFrame& out_frame, MonotonicTime blocking_deadline, CanIOFlags& out_flags);
};
}
#endif // UAVCAN_TRANSPORT_CAN_IO_HPP_INCLUDED
@@ -0,0 +1,80 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_CRC_HPP_INCLUDED
#define UAVCAN_TRANSPORT_CRC_HPP_INCLUDED
#include <cassert>
#include <uavcan/std.hpp>
#include <uavcan/build_config.hpp>
namespace uavcan
{
/**
* CRC-16-CCITT
* Initial value: 0xFFFF
* Poly: 0x1021
* Reverse: no
* Output xor: 0
*
* import crcmod
* crc = crcmod.predefined.Crc('crc-ccitt-false')
* crc.update('123456789')
* crc.hexdigest()
* '29B1'
*/
class UAVCAN_EXPORT TransferCRC
{
#if !UAVCAN_TINY
static const uint16_t Table[256];
#endif
uint16_t value_;
public:
enum { NumBytes = 2 };
TransferCRC()
: value_(0xFFFFU)
{ }
#if UAVCAN_TINY
void add(uint8_t byte)
{
value_ ^= uint16_t(uint16_t(byte) << 8);
for (uint8_t bit = 8; bit > 0; --bit)
{
if (value_ & 0x8000U)
{
value_ = uint16_t(uint16_t(value_ << 1) ^ 0x1021U);
}
else
{
value_ = uint16_t(value_ << 1);
}
}
}
#else
void add(uint8_t byte)
{
value_ = uint16_t(uint16_t((value_ << 8) ^ Table[uint16_t((value_ >> 8) ^ byte) & 0xFFU]) & 0xFFFFU);
}
#endif
void add(const uint8_t* bytes, unsigned len)
{
UAVCAN_ASSERT(bytes);
while (len--)
{
add(*bytes++);
}
}
uint16_t get() const { return value_; }
};
}
#endif // UAVCAN_TRANSPORT_CRC_HPP_INCLUDED
@@ -0,0 +1,242 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_DISPATCHER_HPP_INCLUDED
#define UAVCAN_TRANSPORT_DISPATCHER_HPP_INCLUDED
#include <cassert>
#include <uavcan/error.hpp>
#include <uavcan/std.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/transport/perf_counter.hpp>
#include <uavcan/transport/transfer_listener.hpp>
#include <uavcan/transport/outgoing_transfer_registry.hpp>
#include <uavcan/transport/can_io.hpp>
#include <uavcan/util/linked_list.hpp>
namespace uavcan
{
class UAVCAN_EXPORT Dispatcher;
#if !UAVCAN_TINY
/**
* Inherit this class to receive notifications about all TX CAN frames that were transmitted with the loopback flag.
*/
class UAVCAN_EXPORT LoopbackFrameListenerBase : public LinkedListNode<LoopbackFrameListenerBase>
{
Dispatcher& dispatcher_;
protected:
explicit LoopbackFrameListenerBase(Dispatcher& dispatcher)
: dispatcher_(dispatcher)
{ }
virtual ~LoopbackFrameListenerBase() { stopListening(); }
void startListening();
void stopListening();
bool isListening() const;
Dispatcher& getDispatcher() { return dispatcher_; }
public:
virtual void handleLoopbackFrame(const RxFrame& frame) = 0;
};
class UAVCAN_EXPORT LoopbackFrameListenerRegistry : Noncopyable
{
LinkedListRoot<LoopbackFrameListenerBase> listeners_;
public:
void add(LoopbackFrameListenerBase* listener);
void remove(LoopbackFrameListenerBase* listener);
bool doesExist(const LoopbackFrameListenerBase* listener) const;
unsigned getNumListeners() const { return listeners_.getLength(); }
void invokeListeners(RxFrame& frame);
};
/**
* Implement this interface to receive notifications about all incoming CAN frames, including loopback.
*/
class UAVCAN_EXPORT IRxFrameListener
{
public:
virtual ~IRxFrameListener() { }
/**
* Make sure to filter out loopback frames if they are not wanted.
*/
virtual void handleRxFrame(const CanRxFrame& frame, CanIOFlags flags) = 0;
};
#endif
/**
* This class performs low-level CAN frame routing.
*/
class UAVCAN_EXPORT Dispatcher : Noncopyable
{
CanIOManager canio_;
ISystemClock& sysclock_;
OutgoingTransferRegistry outgoing_transfer_reg_;
TransferPerfCounter perf_;
class ListenerRegistry
{
LinkedListRoot<TransferListener> list_;
class DataTypeIDInsertionComparator
{
const DataTypeID id_;
public:
explicit DataTypeIDInsertionComparator(DataTypeID id) : id_(id) { }
bool operator()(const TransferListener* listener) const
{
UAVCAN_ASSERT(listener);
return id_ > listener->getDataTypeDescriptor().getID();
}
};
public:
enum Mode { UniqueListener, ManyListeners };
bool add(TransferListener* listener, Mode mode);
void remove(TransferListener* listener);
bool exists(DataTypeID dtid) const;
void cleanup(MonotonicTime ts);
void handleFrame(const RxFrame& frame);
unsigned getNumEntries() const { return list_.getLength(); }
const LinkedListRoot<TransferListener>& getList() const { return list_; }
};
ListenerRegistry lmsg_;
ListenerRegistry lsrv_req_;
ListenerRegistry lsrv_resp_;
#if !UAVCAN_TINY
LoopbackFrameListenerRegistry loopback_listeners_;
IRxFrameListener* rx_listener_;
#endif
NodeID self_node_id_;
bool self_node_id_is_set_;
void handleFrame(const CanRxFrame& can_frame);
void handleLoopbackFrame(const CanRxFrame& can_frame);
void notifyRxFrameListener(const CanRxFrame& can_frame, CanIOFlags flags);
public:
Dispatcher(ICanDriver& driver, IPoolAllocator& allocator, ISystemClock& sysclock)
: canio_(driver, allocator, sysclock)
, sysclock_(sysclock)
, outgoing_transfer_reg_(allocator)
#if !UAVCAN_TINY
, rx_listener_(UAVCAN_NULLPTR)
#endif
, self_node_id_(NodeID::Broadcast) // Default
, self_node_id_is_set_(false)
{ }
/**
* This version returns strictly when the deadline is reached.
*/
int spin(MonotonicTime deadline);
/**
* This version does not return until all available frames are processed.
*/
int spinOnce();
/**
* Refer to CanIOManager::send() for the parameter description
*/
int send(const Frame& frame, MonotonicTime tx_deadline, MonotonicTime blocking_deadline, CanTxQueue::Qos qos,
CanIOFlags flags, uint8_t iface_mask);
void cleanup(MonotonicTime ts);
bool registerMessageListener(TransferListener* listener);
bool registerServiceRequestListener(TransferListener* listener);
bool registerServiceResponseListener(TransferListener* listener);
void unregisterMessageListener(TransferListener* listener);
void unregisterServiceRequestListener(TransferListener* listener);
void unregisterServiceResponseListener(TransferListener* listener);
bool hasSubscriber(DataTypeID dtid) const;
bool hasPublisher(DataTypeID dtid) const;
bool hasServer(DataTypeID dtid) const;
unsigned getNumMessageListeners() const { return lmsg_.getNumEntries(); }
unsigned getNumServiceRequestListeners() const { return lsrv_req_.getNumEntries(); }
unsigned getNumServiceResponseListeners() const { return lsrv_resp_.getNumEntries(); }
/**
* These methods can be used to retreive lists of messages, service requests and service responses the
* dispatcher is currently listening to.
* Note that the list of service response listeners is very volatile, because a response listener will be
* removed from this list as soon as the corresponding service call is complete.
* @{
*/
const LinkedListRoot<TransferListener>& getListOfMessageListeners() const
{
return lmsg_.getList();
}
const LinkedListRoot<TransferListener>& getListOfServiceRequestListeners() const
{
return lsrv_req_.getList();
}
const LinkedListRoot<TransferListener>& getListOfServiceResponseListeners() const
{
return lsrv_resp_.getList();
}
/**
* @}
*/
OutgoingTransferRegistry& getOutgoingTransferRegistry() { return outgoing_transfer_reg_; }
#if !UAVCAN_TINY
LoopbackFrameListenerRegistry& getLoopbackFrameListenerRegistry() { return loopback_listeners_; }
IRxFrameListener* getRxFrameListener() const { return rx_listener_; }
void removeRxFrameListener() { rx_listener_ = UAVCAN_NULLPTR; }
void installRxFrameListener(IRxFrameListener* listener)
{
UAVCAN_ASSERT(listener != UAVCAN_NULLPTR);
rx_listener_ = listener;
}
#endif
/**
* Node ID can be set only once.
* Non-unicast Node ID puts the node into passive mode.
*/
NodeID getNodeID() const { return self_node_id_; }
bool setNodeID(NodeID nid);
/**
* Refer to the specs to learn more about passive mode.
*/
bool isPassiveMode() const { return !getNodeID().isUnicast(); }
const ISystemClock& getSystemClock() const { return sysclock_; }
ISystemClock& getSystemClock() { return sysclock_; }
const CanIOManager& getCanIOManager() const { return canio_; }
CanIOManager& getCanIOManager() { return canio_; }
const TransferPerfCounter& getTransferPerfCounter() const { return perf_; }
TransferPerfCounter& getTransferPerfCounter() { return perf_; }
};
}
#endif // UAVCAN_TRANSPORT_DISPATCHER_HPP_INCLUDED
@@ -0,0 +1,144 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_FRAME_HPP_INCLUDED
#define UAVCAN_TRANSPORT_FRAME_HPP_INCLUDED
#include <cassert>
#include <uavcan/transport/transfer.hpp>
#include <uavcan/transport/can_io.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/data_type.hpp>
namespace uavcan
{
class UAVCAN_EXPORT Frame
{
enum { PayloadCapacity = 7 }; // Will be redefined when CAN FD is available
uint8_t payload_[PayloadCapacity];
TransferPriority transfer_priority_;
TransferType transfer_type_;
DataTypeID data_type_id_;
uint_fast8_t payload_len_;
NodeID src_node_id_;
NodeID dst_node_id_;
TransferID transfer_id_;
bool start_of_transfer_;
bool end_of_transfer_;
bool toggle_;
public:
Frame() :
transfer_type_(TransferType(NumTransferTypes)), // Invalid value
payload_len_(0),
start_of_transfer_(false),
end_of_transfer_(false),
toggle_(false)
{ }
Frame(DataTypeID data_type_id,
TransferType transfer_type,
NodeID src_node_id,
NodeID dst_node_id,
TransferID transfer_id) :
transfer_priority_(TransferPriority::Default),
transfer_type_(transfer_type),
data_type_id_(data_type_id),
payload_len_(0),
src_node_id_(src_node_id),
dst_node_id_(dst_node_id),
transfer_id_(transfer_id),
start_of_transfer_(false),
end_of_transfer_(false),
toggle_(false)
{
UAVCAN_ASSERT((transfer_type == TransferTypeMessageBroadcast) == dst_node_id.isBroadcast());
UAVCAN_ASSERT(data_type_id.isValidForDataTypeKind(getDataTypeKindForTransferType(transfer_type)));
UAVCAN_ASSERT(src_node_id.isUnicast() ? (src_node_id != dst_node_id) : true);
}
void setPriority(TransferPriority priority) { transfer_priority_ = priority; }
TransferPriority getPriority() const { return transfer_priority_; }
/**
* Max payload length depends on the transfer type and frame index.
*/
uint8_t getPayloadCapacity() const { return PayloadCapacity; }
uint8_t setPayload(const uint8_t* data, unsigned len);
unsigned getPayloadLen() const { return payload_len_; }
const uint8_t* getPayloadPtr() const { return payload_; }
TransferType getTransferType() const { return transfer_type_; }
DataTypeID getDataTypeID() const { return data_type_id_; }
NodeID getSrcNodeID() const { return src_node_id_; }
NodeID getDstNodeID() const { return dst_node_id_; }
TransferID getTransferID() const { return transfer_id_; }
void setStartOfTransfer(bool x) { start_of_transfer_ = x; }
void setEndOfTransfer(bool x) { end_of_transfer_ = x; }
bool isStartOfTransfer() const { return start_of_transfer_; }
bool isEndOfTransfer() const { return end_of_transfer_; }
void flipToggle() { toggle_ = !toggle_; }
bool getToggle() const { return toggle_; }
bool parse(const CanFrame& can_frame);
bool compile(CanFrame& can_frame) const;
bool isValid() const;
bool operator!=(const Frame& rhs) const { return !operator==(rhs); }
bool operator==(const Frame& rhs) const;
#if UAVCAN_TOSTRING
std::string toString() const;
#endif
};
class UAVCAN_EXPORT RxFrame : public Frame
{
MonotonicTime ts_mono_;
UtcTime ts_utc_;
uint8_t iface_index_;
public:
RxFrame()
: iface_index_(0)
{ }
RxFrame(const Frame& frame, MonotonicTime ts_mono, UtcTime ts_utc, uint8_t iface_index)
: ts_mono_(ts_mono)
, ts_utc_(ts_utc)
, iface_index_(iface_index)
{
*static_cast<Frame*>(this) = frame;
}
bool parse(const CanRxFrame& can_frame);
/**
* Can't be zero.
*/
MonotonicTime getMonotonicTimestamp() const { return ts_mono_; }
/**
* Can be zero if not supported by the platform driver.
*/
UtcTime getUtcTimestamp() const { return ts_utc_; }
uint8_t getIfaceIndex() const { return iface_index_; }
#if UAVCAN_TOSTRING
std::string toString() const;
#endif
};
}
#endif // UAVCAN_TRANSPORT_FRAME_HPP_INCLUDED
@@ -0,0 +1,130 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_OUTGOING_TRANSFER_REGISTRY_HPP_INCLUDED
#define UAVCAN_TRANSPORT_OUTGOING_TRANSFER_REGISTRY_HPP_INCLUDED
#include <cassert>
#include <uavcan/std.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/util/map.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/transport/transfer.hpp>
#include <uavcan/time.hpp>
namespace uavcan
{
class UAVCAN_EXPORT OutgoingTransferRegistryKey
{
DataTypeID data_type_id_;
uint8_t transfer_type_;
NodeID destination_node_id_; ///< Not applicable for message broadcasting
public:
OutgoingTransferRegistryKey()
: transfer_type_(0xFF)
{ }
OutgoingTransferRegistryKey(DataTypeID data_type_id, TransferType transfer_type, NodeID destination_node_id)
: data_type_id_(data_type_id)
, transfer_type_(transfer_type)
, destination_node_id_(destination_node_id)
{
UAVCAN_ASSERT((transfer_type == TransferTypeMessageBroadcast) == destination_node_id.isBroadcast());
/*
* Service response transfers must use the same Transfer ID as matching service request transfer,
* so this registry is not applicable for service response transfers at all.
*/
UAVCAN_ASSERT(transfer_type != TransferTypeServiceResponse);
}
DataTypeID getDataTypeID() const { return data_type_id_; }
TransferType getTransferType() const { return TransferType(transfer_type_); }
bool operator==(const OutgoingTransferRegistryKey& rhs) const
{
return
(data_type_id_ == rhs.data_type_id_) &&
(transfer_type_ == rhs.transfer_type_) &&
(destination_node_id_ == rhs.destination_node_id_);
}
#if UAVCAN_TOSTRING
std::string toString() const;
#endif
};
/**
* Outgoing transfer registry keeps track of Transfer ID values for all currently existing local transfer senders.
* If a local transfer sender was inactive for a sufficiently long time, the outgoing transfer registry will
* remove the respective Transfer ID tracking object.
*/
class UAVCAN_EXPORT OutgoingTransferRegistry : Noncopyable
{
struct Value
{
MonotonicTime deadline;
TransferID tid;
};
class DeadlineExpiredPredicate
{
const MonotonicTime ts_;
public:
explicit DeadlineExpiredPredicate(MonotonicTime ts)
: ts_(ts)
{ }
bool operator()(const OutgoingTransferRegistryKey& key, const Value& value) const
{
(void)key;
UAVCAN_ASSERT(!value.deadline.isZero());
const bool expired = value.deadline <= ts_;
if (expired)
{
UAVCAN_TRACE("OutgoingTransferRegistry", "Expired %s tid=%i",
key.toString().c_str(), int(value.tid.get()));
}
return expired;
}
};
class ExistenceCheckingPredicate
{
const DataTypeID dtid_;
const TransferType tt_;
public:
ExistenceCheckingPredicate(DataTypeID dtid, TransferType tt)
: dtid_(dtid)
, tt_(tt)
{ }
bool operator()(const OutgoingTransferRegistryKey& key, const Value&) const
{
return dtid_ == key.getDataTypeID() && tt_ == key.getTransferType();
}
};
Map<OutgoingTransferRegistryKey, Value> map_;
public:
static const MonotonicDuration MinEntryLifetime;
explicit OutgoingTransferRegistry(IPoolAllocator& allocator)
: map_(allocator)
{ }
TransferID* accessOrCreate(const OutgoingTransferRegistryKey& key, MonotonicTime new_deadline);
bool exists(DataTypeID dtid, TransferType tt) const;
void cleanup(MonotonicTime ts);
};
}
#endif // UAVCAN_TRANSPORT_OUTGOING_TRANSFER_REGISTRY_HPP_INCLUDED
@@ -0,0 +1,72 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_PERF_COUNTER_HPP_INCLUDED
#define UAVCAN_TRANSPORT_PERF_COUNTER_HPP_INCLUDED
#include <uavcan/std.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/util/templates.hpp>
namespace uavcan
{
#if UAVCAN_TINY
class UAVCAN_EXPORT TransferPerfCounter : Noncopyable
{
public:
void addTxTransfer() { }
void addRxTransfer() { }
void addError() { }
void addErrors(unsigned) { }
uint64_t getTxTransferCount() const { return 0; }
uint64_t getRxTransferCount() const { return 0; }
uint64_t getErrorCount() const { return 0; }
};
#else
/**
* The class is declared noncopyable for two reasons:
* - to prevent accidental pass-by-value into a mutator
* - to make the addresses of the counters fixed and exposable to the user of the library
*/
class UAVCAN_EXPORT TransferPerfCounter : Noncopyable
{
uint64_t transfers_tx_;
uint64_t transfers_rx_;
uint64_t errors_;
public:
TransferPerfCounter()
: transfers_tx_(0)
, transfers_rx_(0)
, errors_(0)
{ }
void addTxTransfer() { transfers_tx_++; }
void addRxTransfer() { transfers_rx_++; }
void addError() { errors_++; }
void addErrors(unsigned errors)
{
errors_ += errors;
}
/**
* Returned references are guaranteed to be valid as long as this instance of Node exists.
* This is enforced by virtue of the class being Noncopyable.
*/
const uint64_t& getTxTransferCount() const { return transfers_tx_; }
const uint64_t& getRxTransferCount() const { return transfers_rx_; }
const uint64_t& getErrorCount() const { return errors_; }
};
#endif
}
#endif // UAVCAN_TRANSPORT_PERF_COUNTER_HPP_INCLUDED
@@ -0,0 +1,149 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_TRANSFER_HPP_INCLUDED
#define UAVCAN_TRANSPORT_TRANSFER_HPP_INCLUDED
#include <cassert>
#include <uavcan/build_config.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/std.hpp>
namespace uavcan
{
static const unsigned GuaranteedPayloadLenPerFrame = 7; ///< Guaranteed for all transfers, all CAN standards
enum TransferType
{
TransferTypeServiceResponse = 0,
TransferTypeServiceRequest = 1,
TransferTypeMessageBroadcast = 2
};
static const uint8_t NumTransferTypes = 3;
class UAVCAN_EXPORT TransferPriority
{
uint8_t value_;
public:
static const uint8_t BitLen = 5U;
static const uint8_t NumericallyMax = (1U << BitLen) - 1;
static const uint8_t NumericallyMin = 0;
/// This priority is used by default
static const TransferPriority Default;
static const TransferPriority MiddleLower;
static const TransferPriority OneHigherThanLowest;
static const TransferPriority OneLowerThanHighest;
static const TransferPriority Lowest;
TransferPriority() : value_(0xFF) { }
TransferPriority(uint8_t value) // Implicit
: value_(value)
{
UAVCAN_ASSERT(isValid());
}
template <uint8_t Percent>
static TransferPriority fromPercent()
{
StaticAssert<(Percent <= 100)>::check();
enum { Result = ((100U - Percent) * NumericallyMax) / 100U };
StaticAssert<(Result <= NumericallyMax)>::check();
StaticAssert<(Result >= NumericallyMin)>::check();
return TransferPriority(Result);
}
uint8_t get() const { return value_; }
bool isValid() const { return value_ < (1U << BitLen); }
bool operator!=(TransferPriority rhs) const { return value_ != rhs.value_; }
bool operator==(TransferPriority rhs) const { return value_ == rhs.value_; }
};
class UAVCAN_EXPORT TransferID
{
uint8_t value_;
public:
static const uint8_t BitLen = 5U;
static const uint8_t Max = (1U << BitLen) - 1U;
static const uint8_t Half = (1U << BitLen) / 2U;
TransferID()
: value_(0)
{ }
TransferID(uint8_t value) // implicit
: value_(value)
{
value_ &= Max;
UAVCAN_ASSERT(value == value_);
}
bool operator!=(TransferID rhs) const { return !operator==(rhs); }
bool operator==(TransferID rhs) const { return get() == rhs.get(); }
void increment()
{
value_ = (value_ + 1) & Max;
}
uint8_t get() const
{
UAVCAN_ASSERT(value_ <= Max);
return value_;
}
/**
* Amount of increment() calls to reach rhs value.
*/
int computeForwardDistance(TransferID rhs) const;
};
class UAVCAN_EXPORT NodeID
{
static const uint8_t ValueBroadcast = 0;
static const uint8_t ValueInvalid = 0xFF;
uint8_t value_;
public:
static const uint8_t BitLen = 7U;
static const uint8_t Max = (1U << BitLen) - 1U;
static const uint8_t MaxRecommendedForRegularNodes = Max - 2;
static const NodeID Broadcast;
NodeID() : value_(ValueInvalid) { }
NodeID(uint8_t value) // Implicit
: value_(value)
{
UAVCAN_ASSERT(isValid());
}
uint8_t get() const { return value_; }
bool isValid() const { return value_ <= Max; }
bool isBroadcast() const { return value_ == ValueBroadcast; }
bool isUnicast() const { return (value_ <= Max) && (value_ != ValueBroadcast); }
bool operator!=(NodeID rhs) const { return !operator==(rhs); }
bool operator==(NodeID rhs) const { return value_ == rhs.value_; }
bool operator<(NodeID rhs) const { return value_ < rhs.value_; }
bool operator>(NodeID rhs) const { return value_ > rhs.value_; }
bool operator<=(NodeID rhs) const { return value_ <= rhs.value_; }
bool operator>=(NodeID rhs) const { return value_ >= rhs.value_; }
};
}
#endif // UAVCAN_TRANSPORT_TRANSFER_HPP_INCLUDED
@@ -0,0 +1,199 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_TRANSFER_BUFFER_HPP_INCLUDED
#define UAVCAN_TRANSPORT_TRANSFER_BUFFER_HPP_INCLUDED
#include <uavcan/std.hpp>
#include <uavcan/error.hpp>
#include <uavcan/transport/frame.hpp>
#include <uavcan/transport/abstract_transfer_buffer.hpp>
#include <uavcan/util/linked_list.hpp>
#include <uavcan/dynamic_memory.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/debug.hpp>
namespace uavcan
{
/**
* Standalone static buffer
*/
class StaticTransferBufferImpl : public ITransferBuffer
{
uint8_t* const data_;
const uint16_t size_;
uint16_t max_write_pos_;
public:
StaticTransferBufferImpl(uint8_t* buf, uint16_t buf_size) :
data_(buf),
size_(buf_size),
max_write_pos_(0)
{ }
virtual int read(unsigned offset, uint8_t* data, unsigned len) const override;
virtual int write(unsigned offset, const uint8_t* data, unsigned len) override;
void reset();
uint16_t getSize() const { return size_; }
uint8_t* getRawPtr() { return data_; }
const uint8_t* getRawPtr() const { return data_; }
uint16_t getMaxWritePos() const { return max_write_pos_; }
void setMaxWritePos(uint16_t value) { max_write_pos_ = value; }
};
template <uint16_t Size>
class UAVCAN_EXPORT StaticTransferBuffer : public StaticTransferBufferImpl
{
uint8_t buffer_[Size];
public:
StaticTransferBuffer() : StaticTransferBufferImpl(buffer_, Size)
{
StaticAssert<(Size > 0)>::check();
}
};
/**
* Internal for TransferBufferManager
*/
class UAVCAN_EXPORT TransferBufferManagerKey
{
NodeID node_id_;
uint8_t transfer_type_;
public:
TransferBufferManagerKey()
: transfer_type_(TransferType(0))
{
UAVCAN_ASSERT(isEmpty());
}
TransferBufferManagerKey(NodeID node_id, TransferType ttype)
: node_id_(node_id)
, transfer_type_(ttype)
{
UAVCAN_ASSERT(!isEmpty());
}
bool operator==(const TransferBufferManagerKey& rhs) const
{
return node_id_ == rhs.node_id_ && transfer_type_ == rhs.transfer_type_;
}
bool isEmpty() const { return !node_id_.isValid(); }
NodeID getNodeID() const { return node_id_; }
TransferType getTransferType() const { return TransferType(transfer_type_); }
#if UAVCAN_TOSTRING
std::string toString() const;
#endif
};
/**
* Resizable gather/scatter storage.
* reset() call releases all memory blocks.
* Supports unordered write operations - from higher to lower offsets
*/
class UAVCAN_EXPORT TransferBufferManagerEntry : public ITransferBuffer
, public LinkedListNode<TransferBufferManagerEntry>
{
struct Block : LinkedListNode<Block>
{
enum { Size = MemPoolBlockSize - sizeof(LinkedListNode<Block>) };
uint8_t data[static_cast<unsigned>(Size)];
static Block* instantiate(IPoolAllocator& allocator);
static void destroy(Block*& obj, IPoolAllocator& allocator);
void read(uint8_t*& outptr, unsigned target_offset,
unsigned& total_offset, unsigned& left_to_read);
void write(const uint8_t*& inptr, unsigned target_offset,
unsigned& total_offset, unsigned& left_to_write);
};
IPoolAllocator& allocator_;
LinkedListRoot<Block> blocks_; // Blocks are ordered from lower to higher buffer offset
uint16_t max_write_pos_;
const uint16_t max_size_;
TransferBufferManagerKey key_;
public:
TransferBufferManagerEntry(IPoolAllocator& allocator, uint16_t max_size) :
allocator_(allocator),
max_write_pos_(0),
max_size_(max_size)
{
StaticAssert<(Block::Size > 8)>::check();
IsDynamicallyAllocatable<Block>::check();
IsDynamicallyAllocatable<TransferBufferManagerEntry>::check();
}
virtual ~TransferBufferManagerEntry() { reset(); }
static TransferBufferManagerEntry* instantiate(IPoolAllocator& allocator, uint16_t max_size);
static void destroy(TransferBufferManagerEntry*& obj, IPoolAllocator& allocator);
virtual int read(unsigned offset, uint8_t* data, unsigned len) const override;
virtual int write(unsigned offset, const uint8_t* data, unsigned len) override;
void reset(const TransferBufferManagerKey& key = TransferBufferManagerKey());
const TransferBufferManagerKey& getKey() const { return key_; }
bool isEmpty() const { return key_.isEmpty(); }
};
/**
* Buffer manager implementation.
*/
class TransferBufferManager : public Noncopyable
{
LinkedListRoot<TransferBufferManagerEntry> buffers_;
IPoolAllocator& allocator_;
const uint16_t max_buf_size_;
TransferBufferManagerEntry* findFirst(const TransferBufferManagerKey& key);
public:
TransferBufferManager(uint16_t max_buf_size, IPoolAllocator& allocator) :
allocator_(allocator),
max_buf_size_(max_buf_size)
{ }
~TransferBufferManager();
ITransferBuffer* access(const TransferBufferManagerKey& key);
ITransferBuffer* create(const TransferBufferManagerKey& key);
void remove(const TransferBufferManagerKey& key);
bool isEmpty() const;
unsigned getNumBuffers() const;
};
/**
* Convinience class.
*/
class UAVCAN_EXPORT TransferBufferAccessor
{
TransferBufferManager& bufmgr_;
const TransferBufferManagerKey key_;
public:
TransferBufferAccessor(TransferBufferManager& bufmgr, TransferBufferManagerKey key) :
bufmgr_(bufmgr),
key_(key)
{
UAVCAN_ASSERT(!key.isEmpty());
}
ITransferBuffer* access() { return bufmgr_.access(key_); }
ITransferBuffer* create() { return bufmgr_.create(key_); }
void remove() { bufmgr_.remove(key_); }
};
}
#endif // UAVCAN_TRANSPORT_TRANSFER_BUFFER_HPP_INCLUDED
@@ -0,0 +1,194 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_TRANSFER_LISTENER_HPP_INCLUDED
#define UAVCAN_TRANSPORT_TRANSFER_LISTENER_HPP_INCLUDED
#include <cassert>
#include <uavcan/error.hpp>
#include <uavcan/std.hpp>
#include <uavcan/transport/transfer_receiver.hpp>
#include <uavcan/transport/perf_counter.hpp>
#include <uavcan/util/linked_list.hpp>
#include <uavcan/util/map.hpp>
#include <uavcan/debug.hpp>
#include <uavcan/transport/crc.hpp>
#include <uavcan/data_type.hpp>
namespace uavcan
{
/**
* Container for received transfer.
*/
class UAVCAN_EXPORT IncomingTransfer : public ITransferBuffer
{
MonotonicTime ts_mono_;
UtcTime ts_utc_;
TransferPriority transfer_priority_;
TransferType transfer_type_;
TransferID transfer_id_;
NodeID src_node_id_;
uint8_t iface_index_;
/// That's a no-op, asserts in debug builds
virtual int write(unsigned offset, const uint8_t* data, unsigned len) override;
protected:
IncomingTransfer(MonotonicTime ts_mono, UtcTime ts_utc, TransferPriority transfer_priority,
TransferType transfer_type, TransferID transfer_id, NodeID source_node_id,
uint8_t iface_index)
: ts_mono_(ts_mono)
, ts_utc_(ts_utc)
, transfer_priority_(transfer_priority)
, transfer_type_(transfer_type)
, transfer_id_(transfer_id)
, src_node_id_(source_node_id)
, iface_index_(iface_index)
{ }
public:
/**
* Dispose the payload buffer. Further calls to read() will not be possible.
*/
virtual void release() { }
/**
* Whether this is a anonymous transfer
*/
virtual bool isAnonymousTransfer() const { return false; }
MonotonicTime getMonotonicTimestamp() const { return ts_mono_; }
UtcTime getUtcTimestamp() const { return ts_utc_; }
TransferPriority getPriority() const { return transfer_priority_; }
TransferType getTransferType() const { return transfer_type_; }
TransferID getTransferID() const { return transfer_id_; }
NodeID getSrcNodeID() const { return src_node_id_; }
uint8_t getIfaceIndex() const { return iface_index_; }
};
/**
* Internal.
*/
class UAVCAN_EXPORT SingleFrameIncomingTransfer : public IncomingTransfer
{
const uint8_t* const payload_;
const uint8_t payload_len_;
public:
explicit SingleFrameIncomingTransfer(const RxFrame& frm);
virtual int read(unsigned offset, uint8_t* data, unsigned len) const override;
virtual bool isAnonymousTransfer() const override;
};
/**
* Internal.
*/
class UAVCAN_EXPORT MultiFrameIncomingTransfer : public IncomingTransfer, Noncopyable
{
TransferBufferAccessor& buf_acc_;
public:
MultiFrameIncomingTransfer(MonotonicTime ts_mono, UtcTime ts_utc, const RxFrame& last_frame,
TransferBufferAccessor& tba);
virtual int read(unsigned offset, uint8_t* data, unsigned len) const override;
virtual void release() override { buf_acc_.remove(); }
};
/**
* Internal, refer to the transport dispatcher class.
*/
class UAVCAN_EXPORT TransferListener : public LinkedListNode<TransferListener>
{
const DataTypeDescriptor& data_type_;
TransferBufferManager bufmgr_;
Map<TransferBufferManagerKey, TransferReceiver> receivers_;
TransferPerfCounter& perf_;
const TransferCRC crc_base_; ///< Pre-initialized with data type hash, thus constant
bool allow_anonymous_transfers_;
class TimedOutReceiverPredicate
{
const MonotonicTime ts_;
TransferBufferManager& parent_bufmgr_;
public:
TimedOutReceiverPredicate(MonotonicTime arg_ts, TransferBufferManager& arg_bufmgr)
: ts_(arg_ts)
, parent_bufmgr_(arg_bufmgr)
{ }
bool operator()(const TransferBufferManagerKey& key, const TransferReceiver& value) const;
};
bool checkPayloadCrc(const uint16_t compare_with, const ITransferBuffer& tbb) const;
protected:
void handleReception(TransferReceiver& receiver, const RxFrame& frame, TransferBufferAccessor& tba);
void handleAnonymousTransferReception(const RxFrame& frame);
virtual void handleIncomingTransfer(IncomingTransfer& transfer) = 0;
public:
TransferListener(TransferPerfCounter& perf, const DataTypeDescriptor& data_type,
uint16_t max_buffer_size, IPoolAllocator& allocator)
: data_type_(data_type)
, bufmgr_(max_buffer_size, allocator)
, receivers_(allocator)
, perf_(perf)
, crc_base_(data_type.getSignature().toTransferCRC())
, allow_anonymous_transfers_(false)
{ }
virtual ~TransferListener();
const DataTypeDescriptor& getDataTypeDescriptor() const { return data_type_; }
/**
* By default, anonymous transfers will be ignored.
* This option allows to enable reception of anonymous transfers.
*/
void allowAnonymousTransfers() { allow_anonymous_transfers_ = true; }
void cleanup(MonotonicTime ts);
virtual void handleFrame(const RxFrame& frame);
};
/**
* This class is used by transfer listener to decide if the frame should be accepted or ignored.
*/
class ITransferAcceptanceFilter
{
public:
/**
* If it returns false, the frame will be ignored, otherwise accepted.
*/
virtual bool shouldAcceptFrame(const RxFrame& frame) const = 0;
virtual ~ITransferAcceptanceFilter() { }
};
/**
* This class should be derived by callers.
*/
class UAVCAN_EXPORT TransferListenerWithFilter : public TransferListener
{
const ITransferAcceptanceFilter* filter_;
virtual void handleFrame(const RxFrame& frame) override;
public:
TransferListenerWithFilter(TransferPerfCounter& perf, const DataTypeDescriptor& data_type,
uint16_t max_buffer_size, IPoolAllocator& allocator)
: TransferListener(perf, data_type, max_buffer_size, allocator)
, filter_(UAVCAN_NULLPTR)
{ }
void installAcceptanceFilter(const ITransferAcceptanceFilter* acceptance_filter)
{
filter_ = acceptance_filter;
}
};
}
#endif // UAVCAN_TRANSPORT_TRANSFER_LISTENER_HPP_INCLUDED
@@ -0,0 +1,96 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_TRANSFER_RECEIVER_HPP_INCLUDED
#define UAVCAN_TRANSPORT_TRANSFER_RECEIVER_HPP_INCLUDED
#include <cstdlib>
#include <uavcan/build_config.hpp>
#include <uavcan/transport/frame.hpp>
#include <uavcan/transport/transfer_buffer.hpp>
namespace uavcan
{
class UAVCAN_EXPORT TransferReceiver
{
public:
enum ResultCode { ResultNotComplete, ResultComplete, ResultSingleFrame };
static const uint16_t MinTransferIntervalMSec = 1;
static const uint16_t MaxTransferIntervalMSec = 0xFFFF;
static const uint16_t DefaultTransferIntervalMSec = 1000;
static const uint16_t DefaultTidTimeoutMSec = 1000;
static MonotonicDuration getDefaultTransferInterval()
{
return MonotonicDuration::fromMSec(DefaultTransferIntervalMSec);
}
static MonotonicDuration getMinTransferInterval() { return MonotonicDuration::fromMSec(MinTransferIntervalMSec); }
static MonotonicDuration getMaxTransferInterval() { return MonotonicDuration::fromMSec(MaxTransferIntervalMSec); }
private:
enum { IfaceIndexNotSet = MaxCanIfaces };
enum { ErrorCntMask = 31 };
enum { IfaceIndexMask = MaxCanIfaces };
MonotonicTime prev_transfer_ts_;
MonotonicTime this_transfer_ts_;
UtcTime first_frame_ts_;
uint16_t transfer_interval_msec_;
uint16_t this_transfer_crc_;
uint16_t buffer_write_pos_;
TransferID tid_; // 1 byte field
// 1 byte aligned bitfields:
uint8_t next_toggle_ : 1;
uint8_t iface_index_ : 2;
mutable uint8_t error_cnt_ : 5;
bool isInitialized() const { return iface_index_ != IfaceIndexNotSet; }
bool isMidTransfer() const { return buffer_write_pos_ > 0; }
MonotonicDuration getIfaceSwitchDelay() const;
MonotonicDuration getTidTimeout() const;
void registerError() const;
void updateTransferTimings();
void prepareForNextTransfer();
bool validate(const RxFrame& frame) const;
bool writePayload(const RxFrame& frame, ITransferBuffer& buf);
ResultCode receive(const RxFrame& frame, TransferBufferAccessor& tba);
public:
TransferReceiver() :
transfer_interval_msec_(DefaultTransferIntervalMSec),
this_transfer_crc_(0),
buffer_write_pos_(0),
next_toggle_(false),
iface_index_(IfaceIndexNotSet),
error_cnt_(0)
{ }
bool isTimedOut(MonotonicTime current_ts) const;
ResultCode addFrame(const RxFrame& frame, TransferBufferAccessor& tba);
uint8_t yieldErrorCount();
MonotonicTime getLastTransferTimestampMonotonic() const { return prev_transfer_ts_; }
UtcTime getLastTransferTimestampUtc() const { return first_frame_ts_; }
uint16_t getLastTransferCrc() const { return this_transfer_crc_; }
MonotonicDuration getInterval() const { return MonotonicDuration::fromMSec(transfer_interval_msec_); }
};
}
#endif // UAVCAN_TRANSPORT_TRANSFER_RECEIVER_HPP_INCLUDED
@@ -0,0 +1,115 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_TRANSPORT_TRANSFER_SENDER_HPP_INCLUDED
#define UAVCAN_TRANSPORT_TRANSFER_SENDER_HPP_INCLUDED
#include <cstdlib>
#include <cassert>
#include <uavcan/build_config.hpp>
#include <uavcan/error.hpp>
#include <uavcan/data_type.hpp>
#include <uavcan/transport/crc.hpp>
#include <uavcan/transport/transfer.hpp>
#include <uavcan/transport/dispatcher.hpp>
namespace uavcan
{
class UAVCAN_EXPORT TransferSender
{
const MonotonicDuration max_transfer_interval_;
Dispatcher& dispatcher_;
TransferPriority priority_;
CanTxQueue::Qos qos_;
TransferCRC crc_base_;
DataTypeID data_type_id_;
CanIOFlags flags_;
uint8_t iface_mask_;
bool allow_anonymous_transfers_;
void registerError() const;
public:
enum { AllIfacesMask = 0xFF };
static MonotonicDuration getDefaultMaxTransferInterval()
{
return MonotonicDuration::fromMSec(60 * 1000);
}
TransferSender(Dispatcher& dispatcher, const DataTypeDescriptor& data_type, CanTxQueue::Qos qos,
MonotonicDuration max_transfer_interval = getDefaultMaxTransferInterval())
: max_transfer_interval_(max_transfer_interval)
, dispatcher_(dispatcher)
, priority_(TransferPriority::Default)
, qos_(CanTxQueue::Qos())
, flags_(CanIOFlags(0))
, iface_mask_(AllIfacesMask)
, allow_anonymous_transfers_(false)
{
init(data_type, qos);
}
TransferSender(Dispatcher& dispatcher, MonotonicDuration max_transfer_interval = getDefaultMaxTransferInterval())
: max_transfer_interval_(max_transfer_interval)
, dispatcher_(dispatcher)
, priority_(TransferPriority::Default)
, qos_(CanTxQueue::Qos())
, flags_(CanIOFlags(0))
, iface_mask_(AllIfacesMask)
, allow_anonymous_transfers_(false)
{ }
void init(const DataTypeDescriptor& dtid, CanTxQueue::Qos qos);
bool isInitialized() const { return data_type_id_ != DataTypeID(); }
CanIOFlags getCanIOFlags() const { return flags_; }
void setCanIOFlags(CanIOFlags flags) { flags_ = flags; }
uint8_t getIfaceMask() const { return iface_mask_; }
void setIfaceMask(uint8_t iface_mask)
{
UAVCAN_ASSERT(iface_mask);
iface_mask_ = iface_mask;
}
TransferPriority getPriority() const { return priority_; }
void setPriority(TransferPriority prio) { priority_ = prio; }
/**
* Anonymous transfers (i.e. transfers that don't carry a valid Source Node ID) can be sent if
* the local node is configured in passive mode (i.e. the node doesn't have a valid Node ID).
* By default, this class will return an error if it is asked to send a transfer while the
* node is configured in passive mode. However, if this option is enabled, it will be possible
* to send anonymous transfers from passive mode.
*/
void allowAnonymousTransfers() { allow_anonymous_transfers_ = true; }
/**
* Send with explicit Transfer ID.
* Should be used only for service responses, where response TID should match request TID.
*/
int send(const uint8_t* payload, unsigned payload_len, MonotonicTime tx_deadline,
MonotonicTime blocking_deadline, TransferType transfer_type, NodeID dst_node_id,
TransferID tid) const;
/**
* Send with automatic Transfer ID.
*
* Note that as long as the local node operates in passive mode, the
* flag @ref CanIOFlagAbortOnError will be set implicitly for all outgoing frames.
*
* TID is managed by OutgoingTransferRegistry.
*/
int send(const uint8_t* payload, unsigned payload_len, MonotonicTime tx_deadline,
MonotonicTime blocking_deadline, TransferType transfer_type, NodeID dst_node_id) const;
};
}
#endif // UAVCAN_TRANSPORT_TRANSFER_SENDER_HPP_INCLUDED
@@ -0,0 +1,27 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*
* This header should be included by the user application.
*/
#ifndef UAVCAN_UAVCAN_HPP_INCLUDED
#define UAVCAN_UAVCAN_HPP_INCLUDED
#include <uavcan/build_config.hpp>
#include <uavcan/time.hpp>
// High-level node logic
#include <uavcan/node/node.hpp>
#include <uavcan/node/timer.hpp>
#include <uavcan/node/publisher.hpp>
#include <uavcan/node/subscriber.hpp>
#include <uavcan/node/service_server.hpp>
#include <uavcan/node/service_client.hpp>
#include <uavcan/node/global_data_type_registry.hpp>
// Util
#include <uavcan/util/templates.hpp>
#include <uavcan/util/lazy_constructor.hpp>
#include <uavcan/util/method_binder.hpp>
#endif // UAVCAN_UAVCAN_HPP_INCLUDED
@@ -0,0 +1,189 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_UTIL_BITSET_HPP_INCLUDED
#define UAVCAN_UTIL_BITSET_HPP_INCLUDED
#include <cassert>
#include <cstddef>
#include <cstring>
#include <uavcan/build_config.hpp>
namespace uavcan
{
/**
* STL-like bitset
*/
template <std::size_t NumBits>
class UAVCAN_EXPORT BitSet
{
enum { NumBytes = (NumBits + 7) / 8 };
static std::size_t getByteNum(std::size_t bit_num) { return bit_num / 8; }
static std::size_t getBitNum(const std::size_t bit_num) { return bit_num % 8; }
static void validatePos(std::size_t& inout_pos)
{
if (inout_pos >= NumBits)
{
UAVCAN_ASSERT(0);
inout_pos = NumBits - 1;
}
}
char data_[NumBytes];
public:
class Reference
{
friend class BitSet;
BitSet* const parent_;
const std::size_t bitpos_;
Reference(BitSet* arg_parent, std::size_t arg_bitpos)
: parent_(arg_parent)
, bitpos_(arg_bitpos)
{ }
public:
Reference& operator=(bool x)
{
parent_->set(bitpos_, x);
return *this;
}
Reference& operator=(const Reference& x)
{
parent_->set(bitpos_, x);
return *this;
}
bool operator~() const
{
return !parent_->test(bitpos_);
}
operator bool() const
{
return parent_->test(bitpos_);
}
};
BitSet()
: data_()
{
reset();
}
BitSet<NumBits>& reset()
{
std::memset(data_, 0, NumBytes);
return *this;
}
BitSet<NumBits>& set()
{
std::memset(data_, 0xFF, NumBytes);
return *this;
}
BitSet<NumBits>& set(std::size_t pos, bool val = true)
{
validatePos(pos);
if (val)
{
data_[getByteNum(pos)] = char(data_[getByteNum(pos)] | (1 << getBitNum(pos)));
}
else
{
data_[getByteNum(pos)] = char(data_[getByteNum(pos)] & ~(1 << getBitNum(pos)));
}
return *this;
}
bool test(std::size_t pos) const
{
return (data_[getByteNum(pos)] & (1 << getBitNum(pos))) != 0;
}
bool any() const
{
for (std::size_t i = 0; i < NumBits; ++i)
{
if (test(i))
{
return true;
}
}
return false;
}
std::size_t count() const
{
std::size_t retval = 0;
for (std::size_t i = 0; i < NumBits; ++i)
{
retval += test(i) ? 1U : 0U;
}
return retval;
}
std::size_t size() const { return NumBits; }
bool operator[](std::size_t pos) const
{
return test(pos);
}
Reference operator[](std::size_t pos)
{
validatePos(pos);
return Reference(this, pos);
}
BitSet<NumBits>& operator=(const BitSet<NumBits> & rhs)
{
if (&rhs == this)
{
return *this;
}
for (std::size_t i = 0; i < NumBytes; ++i)
{
data_[i] = rhs.data_[i];
}
return *this;
}
bool operator!=(const BitSet<NumBits>& rhs) const { return !operator==(rhs); }
bool operator==(const BitSet<NumBits>& rhs) const
{
for (std::size_t i = 0; i < NumBits; ++i)
{
if (test(i) != rhs.test(i))
{
return false;
}
}
return true;
}
};
template <> class BitSet<0>; ///< Invalid instantiation
template <typename Stream, std::size_t NumBits>
Stream& operator<<(Stream& s, const BitSet<NumBits>& x)
{
for (std::size_t i = NumBits; i > 0; --i)
{
s << (x.test(i-1) ? "1" : "0");
}
return s;
}
}
#endif // UAVCAN_UTIL_BITSET_HPP_INCLUDED
@@ -0,0 +1,271 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_UTIL_COMPARISON_HPP_INCLUDED
#define UAVCAN_UTIL_COMPARISON_HPP_INCLUDED
#include <uavcan/util/templates.hpp>
#include <uavcan/build_config.hpp>
namespace uavcan
{
/**
* Exact comparison of two floats that suppresses the compiler warnings.
*/
template <typename T>
UAVCAN_EXPORT
inline bool areFloatsExactlyEqual(const T& left, const T& right)
{
return (left <= right) && (left >= right);
}
/**
* This function performs fuzzy comparison of two floating point numbers.
* Type of T can be either float, double or long double.
* For details refer to http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
* See also: @ref UAVCAN_FLOAT_COMPARISON_EPSILON_MULT.
*/
template <typename T>
UAVCAN_EXPORT
inline bool areFloatsClose(T a, T b, const T& absolute_epsilon, const T& relative_epsilon)
{
// NAN
if (isNaN(a) || isNaN(b))
{
return false;
}
// Infinity
if (isInfinity(a) || isInfinity(b))
{
return areFloatsExactlyEqual(a, b);
}
// Close numbers near zero
const T diff = std::fabs(a - b);
if (diff <= absolute_epsilon)
{
return true;
}
// General case
a = std::fabs(a);
b = std::fabs(b);
const T largest = (b > a) ? b : a;
return (diff <= largest * relative_epsilon);
}
/**
* This namespace contains implementation details for areClose().
* Don't try this at home.
*/
namespace are_close_impl_
{
struct Applicable { char foo[1]; };
struct NotApplicable { long long foo[16]; };
template <typename This, typename Rhs>
struct HasIsCloseMethod
{
template <typename U, typename R, bool (U::*)(const R&) const> struct ConstRef { };
template <typename U, typename R, bool (U::*)(R) const> struct ByValue { };
template <typename U, typename R> static Applicable test(ConstRef<U, R, &U::isClose>*);
template <typename U, typename R> static Applicable test(ByValue<U, R, &U::isClose>*);
template <typename U, typename R> static NotApplicable test(...);
enum { Result = sizeof(test<This, Rhs>(UAVCAN_NULLPTR)) };
};
/// First stage: bool L::isClose(R)
template <typename L, typename R>
UAVCAN_EXPORT
inline bool areCloseImplFirst(const L& left, const R& right, IntToType<sizeof(Applicable)>)
{
return left.isClose(right);
}
/// Second stage: bool R::isClose(L)
template <typename L, typename R>
UAVCAN_EXPORT
inline bool areCloseImplSecond(const L& left, const R& right, IntToType<sizeof(Applicable)>)
{
return right.isClose(left);
}
/// Second stage: L == R
template <typename L, typename R>
UAVCAN_EXPORT
inline bool areCloseImplSecond(const L& left, const R& right, IntToType<sizeof(NotApplicable)>)
{
return left == right;
}
/// First stage: select either L == R or bool R::isClose(L)
template <typename L, typename R>
UAVCAN_EXPORT
inline bool areCloseImplFirst(const L& left, const R& right, IntToType<sizeof(NotApplicable)>)
{
return are_close_impl_::areCloseImplSecond(left, right,
IntToType<are_close_impl_::HasIsCloseMethod<R, L>::Result>());
}
} // namespace are_close_impl_
/**
* Generic fuzzy comparison function.
*
* This function properly handles floating point comparison, including mixed floating point type comparison,
* e.g. float with long double.
*
* Two objects of types A and B will be fuzzy comparable if either method is defined:
* - bool A::isClose(const B&) const
* - bool A::isClose(B) const
* - bool B::isClose(const A&) const
* - bool B::isClose(A) const
*
* Call areClose(A, B) will be dispatched as follows:
*
* - If A and B are both floating point types (float, double, long double) - possibly different - the call will be
* dispatched to @ref areFloatsClose(). If A and B are different types, value of the larger type will be coerced
* to the smaller type, e.g. areClose(long double, float) --> areClose(float, float).
*
* - If A defines isClose() that accepts B, the call will be dispatched there.
*
* - If B defines isClose() that accepts A, the call will be dispatched there (A/B swapped).
*
* - Last resort is A == B.
*
* Alternatively, a custom behavior can be implemented via template specialization.
*
* See also: @ref UAVCAN_FLOAT_COMPARISON_EPSILON_MULT.
*
* Examples:
* areClose(1.0, 1.0F) --> true
* areClose(1.0, 1.0F + std::numeric_limits<float>::epsilon()) --> true
* areClose(1.0, 1.1) --> false
* areClose("123", std::string("123")) --> true (using std::string's operator ==)
* areClose(inf, inf) --> true
* areClose(inf, -inf) --> false
* areClose(nan, nan) --> false (any comparison with nan returns false)
* areClose(123, "123") --> compilation error: operator == is not defined
*/
template <typename L, typename R>
UAVCAN_EXPORT
inline bool areClose(const L& left, const R& right)
{
return are_close_impl_::areCloseImplFirst(left, right,
IntToType<are_close_impl_::HasIsCloseMethod<L, R>::Result>());
}
/*
* Float comparison specializations
*/
template <>
UAVCAN_EXPORT
inline bool areClose<float, float>(const float& left, const float& right)
{
return areFloatsClose(left, right, NumericTraits<float>::epsilon(),
NumericTraits<float>::epsilon() * FloatComparisonEpsilonMult);
}
template <>
UAVCAN_EXPORT
inline bool areClose<double, double>(const double& left, const double& right)
{
return areFloatsClose(left, right, NumericTraits<double>::epsilon(),
NumericTraits<double>::epsilon() * FloatComparisonEpsilonMult);
}
template <>
UAVCAN_EXPORT
inline bool areClose<long double, long double>(const long double& left, const long double& right)
{
return areFloatsClose(left, right, NumericTraits<long double>::epsilon(),
NumericTraits<long double>::epsilon() * FloatComparisonEpsilonMult);
}
/*
* Mixed floating type comparison - coercing larger type to smaller type
*/
template <>
UAVCAN_EXPORT
inline bool areClose<float, double>(const float& left, const double& right)
{
return areClose(left, static_cast<float>(right));
}
template <>
UAVCAN_EXPORT
inline bool areClose<double, float>(const double& left, const float& right)
{
return areClose(static_cast<float>(left), right);
}
template <>
UAVCAN_EXPORT
inline bool areClose<float, long double>(const float& left, const long double& right)
{
return areClose(left, static_cast<float>(right));
}
template <>
UAVCAN_EXPORT
inline bool areClose<long double, float>(const long double& left, const float& right)
{
return areClose(static_cast<float>(left), right);
}
template <>
UAVCAN_EXPORT
inline bool areClose<double, long double>(const double& left, const long double& right)
{
return areClose(left, static_cast<double>(right));
}
template <>
UAVCAN_EXPORT
inline bool areClose<long double, double>(const long double& left, const double& right)
{
return areClose(static_cast<double>(left), right);
}
/**
* Comparison against zero.
* Helps to compare a floating point number against zero if the exact type is unknown.
* For non-floating point types performs exact comparison against integer zero.
*/
template <typename T>
UAVCAN_EXPORT
inline bool isCloseToZero(const T& x)
{
return x == 0;
}
template <>
UAVCAN_EXPORT
inline bool isCloseToZero<float>(const float& x)
{
return areClose(x, static_cast<float>(0.0F));
}
template <>
UAVCAN_EXPORT
inline bool isCloseToZero<double>(const double& x)
{
return areClose(x, static_cast<double>(0.0));
}
template <>
UAVCAN_EXPORT
inline bool isCloseToZero<long double>(const long double& x)
{
return areClose(x, static_cast<long double>(0.0L));
}
}
#endif // UAVCAN_UTIL_COMPARISON_HPP_INCLUDED
@@ -0,0 +1,182 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_UTIL_LAZY_CONSTRUCTOR_HPP_INCLUDED
#define UAVCAN_UTIL_LAZY_CONSTRUCTOR_HPP_INCLUDED
#include <cstdlib>
#include <uavcan/error.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/util/templates.hpp>
#ifndef UAVCAN_CPP_VERSION
# error UAVCAN_CPP_VERSION
#endif
namespace uavcan
{
/**
* This class allows to postpone the object contruction.
* It statically allocates a pool of memory of just enough size to fit the object being constructed.
* Later call to construct<>() calls the constructor of the object.
* The object will be destroyed automatically when the container class destructor is called.
* The memory pool is aligned at the size of the largest primitive type (long double or long long int).
*/
template <typename T>
class UAVCAN_EXPORT LazyConstructor
{
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
struct
{
alignas(T) unsigned char pool[sizeof(T)];
} data_;
#else
union
{
unsigned char pool[sizeof(T)];
long double _aligner1;
long long _aligner2;
} data_;
#endif
T* ptr_;
void ensureConstructed() const
{
if (!ptr_)
{
handleFatalError("LazyConstructor<T>");
}
}
void ensureNotConstructed() const
{
if (ptr_)
{
handleFatalError("LazyConstructor<T>");
}
}
public:
LazyConstructor()
: ptr_(UAVCAN_NULLPTR)
{
fill(data_.pool, data_.pool + sizeof(T), uint8_t(0));
}
LazyConstructor(const LazyConstructor<T>& rhs) // Implicit
: ptr_(UAVCAN_NULLPTR)
{
fill(data_.pool, data_.pool + sizeof(T), uint8_t(0));
if (rhs)
{
construct<const T&>(*rhs); // Invoke copy constructor
}
}
~LazyConstructor() { destroy(); }
LazyConstructor<T>& operator=(const LazyConstructor<T>& rhs)
{
destroy();
if (rhs)
{
construct<const T&>(*rhs); // Invoke copy constructor
}
return *this;
}
bool isConstructed() const { return ptr_ != UAVCAN_NULLPTR; }
operator T*() const { return ptr_; }
const T* operator->() const { ensureConstructed(); return ptr_; }
T* operator->() { ensureConstructed(); return ptr_; }
const T& operator*() const { ensureConstructed(); return *ptr_; }
T& operator*() { ensureConstructed(); return *ptr_; }
void destroy()
{
if (ptr_)
{
ptr_->~T();
}
ptr_ = UAVCAN_NULLPTR;
fill(data_.pool, data_.pool + sizeof(T), uint8_t(0));
}
void construct()
{
ensureNotConstructed();
ptr_ = new (static_cast<void*>(data_.pool)) T();
}
// MAX_ARGS = 6
// TEMPLATE = '''
// template <%s>
// void construct(%s)
// {
// ensureNotConstructed();
// ptr_ = new (static_cast<void*>(data_.pool)) T(%s);
// }'''
// for nargs in range(1, MAX_ARGS + 1):
// nums = [(x + 1) for x in range(nargs)]
// l1 = ['typename P%d' % x for x in nums]
// l2 = ['typename ParameterType<P%d>::Type p%d' % (x, x) for x in nums]
// l3 = ['p%d' % x for x in nums]
// print(TEMPLATE % (', '.join(l1), ', '.join(l2), ', '.join(l3)))
template <typename P1>
void construct(typename ParameterType<P1>::Type p1)
{
ensureNotConstructed();
ptr_ = new (static_cast<void*>(data_.pool)) T(p1);
}
template<typename P1, typename P2>
void construct(typename ParameterType<P1>::Type p1, typename ParameterType<P2>::Type p2)
{
ensureNotConstructed();
ptr_ = new (static_cast<void*>(data_.pool)) T(p1, p2);
}
template<typename P1, typename P2, typename P3>
void construct(typename ParameterType<P1>::Type p1, typename ParameterType<P2>::Type p2,
typename ParameterType<P3>::Type p3)
{
ensureNotConstructed();
ptr_ = new (static_cast<void*>(data_.pool)) T(p1, p2, p3);
}
template<typename P1, typename P2, typename P3, typename P4>
void construct(typename ParameterType<P1>::Type p1, typename ParameterType<P2>::Type p2,
typename ParameterType<P3>::Type p3, typename ParameterType<P4>::Type p4)
{
ensureNotConstructed();
ptr_ = new (static_cast<void*>(data_.pool)) T(p1, p2, p3, p4);
}
template<typename P1, typename P2, typename P3, typename P4, typename P5>
void construct(typename ParameterType<P1>::Type p1, typename ParameterType<P2>::Type p2,
typename ParameterType<P3>::Type p3, typename ParameterType<P4>::Type p4,
typename ParameterType<P5>::Type p5)
{
ensureNotConstructed();
ptr_ = new (static_cast<void*>(data_.pool)) T(p1, p2, p3, p4, p5);
}
template<typename P1, typename P2, typename P3, typename P4, typename P5, typename P6>
void construct(typename ParameterType<P1>::Type p1, typename ParameterType<P2>::Type p2,
typename ParameterType<P3>::Type p3, typename ParameterType<P4>::Type p4,
typename ParameterType<P5>::Type p5, typename ParameterType<P6>::Type p6)
{
ensureNotConstructed();
ptr_ = new (static_cast<void*>(data_.pool)) T(p1, p2, p3, p4, p5, p6);
}
};
}
#endif // UAVCAN_UTIL_LAZY_CONSTRUCTOR_HPP_INCLUDED
@@ -0,0 +1,176 @@
/*
* Singly-linked list.
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_UTIL_LINKED_LIST_HPP_INCLUDED
#define UAVCAN_UTIL_LINKED_LIST_HPP_INCLUDED
#include <cstdlib>
#include <cassert>
#include <uavcan/build_config.hpp>
#include <uavcan/util/templates.hpp>
namespace uavcan
{
/**
* Classes that are supposed to be linked-listed should derive this.
*/
template <typename T>
class UAVCAN_EXPORT LinkedListNode : Noncopyable
{
T* next_;
protected:
LinkedListNode()
: next_(UAVCAN_NULLPTR)
{ }
~LinkedListNode() { }
public:
T* getNextListNode() const { return next_; }
void setNextListNode(T* node)
{
next_ = node;
}
};
/**
* Linked list root.
*/
template <typename T>
class UAVCAN_EXPORT LinkedListRoot : Noncopyable
{
T* root_;
public:
LinkedListRoot()
: root_(UAVCAN_NULLPTR)
{ }
T* get() const { return root_; }
bool isEmpty() const { return get() == UAVCAN_NULLPTR; }
/**
* Complexity: O(N)
*/
unsigned getLength() const;
/**
* Inserts the node to the beginning of the list.
* If the node is already present in the list, it will be relocated to the beginning.
* Complexity: O(N)
*/
void insert(T* node);
/**
* Inserts the node immediately before the node X where predicate(X) returns true.
* If the node is already present in the list, it can be relocated to a new position.
* Complexity: O(2N) (calls remove())
*/
template <typename Predicate>
void insertBefore(T* node, Predicate predicate);
/**
* Removes only the first occurence of the node.
* Complexity: O(N)
*/
void remove(const T* node);
};
// ----------------------------------------------------------------------------
/*
* LinkedListRoot<>
*/
template <typename T>
unsigned LinkedListRoot<T>::getLength() const
{
T* node = root_;
unsigned cnt = 0;
while (node)
{
cnt++;
node = node->getNextListNode();
}
return cnt;
}
template <typename T>
void LinkedListRoot<T>::insert(T* node)
{
if (node == UAVCAN_NULLPTR)
{
UAVCAN_ASSERT(0);
return;
}
remove(node); // Making sure there will be no loops
node->setNextListNode(root_);
root_ = node;
}
template <typename T>
template <typename Predicate>
void LinkedListRoot<T>::insertBefore(T* node, Predicate predicate)
{
if (node == UAVCAN_NULLPTR)
{
UAVCAN_ASSERT(0);
return;
}
remove(node);
if (root_ == UAVCAN_NULLPTR || predicate(root_))
{
node->setNextListNode(root_);
root_ = node;
}
else
{
T* p = root_;
while (p->getNextListNode())
{
if (predicate(p->getNextListNode()))
{
break;
}
p = p->getNextListNode();
}
node->setNextListNode(p->getNextListNode());
p->setNextListNode(node);
}
}
template <typename T>
void LinkedListRoot<T>::remove(const T* node)
{
if (root_ == UAVCAN_NULLPTR || node == UAVCAN_NULLPTR)
{
return;
}
if (root_ == node)
{
root_ = root_->getNextListNode();
}
else
{
T* p = root_;
while (p->getNextListNode())
{
if (p->getNextListNode() == node)
{
p->setNextListNode(p->getNextListNode()->getNextListNode());
break;
}
p = p->getNextListNode();
}
}
}
}
#endif // UAVCAN_UTIL_LINKED_LIST_HPP_INCLUDED
@@ -0,0 +1,388 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_UTIL_MAP_HPP_INCLUDED
#define UAVCAN_UTIL_MAP_HPP_INCLUDED
#include <cassert>
#include <cstdlib>
#include <uavcan/util/linked_list.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/dynamic_memory.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/util/placement_new.hpp>
namespace uavcan
{
/**
* Slow but memory efficient KV container.
*
* KV pairs will be allocated in the node's memory pool.
*
* Please be aware that this container does not perform any speed optimizations to minimize memory footprint,
* so the complexity of most operations is O(N).
*
* Type requirements:
* Both key and value must be copyable, assignable and default constructible.
* Key must implement a comparison operator.
* Key's default constructor must initialize the object into invalid state.
* Size of Key + Value + padding must not exceed MemPoolBlockSize.
*/
template <typename Key, typename Value>
class UAVCAN_EXPORT Map : Noncopyable
{
public:
struct KVPair
{
Value value; // Key and value are swapped because this may allow to reduce padding (depending on types)
Key key;
KVPair() :
value(),
key()
{ }
KVPair(const Key& arg_key, const Value& arg_value) :
value(arg_value),
key(arg_key)
{ }
bool match(const Key& rhs) const { return rhs == key; }
};
private:
struct KVGroup : LinkedListNode<KVGroup>
{
enum { NumKV = (MemPoolBlockSize - sizeof(LinkedListNode<KVGroup>)) / sizeof(KVPair) };
KVPair kvs[NumKV];
KVGroup()
{
StaticAssert<(static_cast<unsigned>(NumKV) > 0)>::check();
IsDynamicallyAllocatable<KVGroup>::check();
}
static KVGroup* instantiate(IPoolAllocator& allocator)
{
void* const praw = allocator.allocate(sizeof(KVGroup));
if (praw == UAVCAN_NULLPTR)
{
return UAVCAN_NULLPTR;
}
return new (praw) KVGroup();
}
static void destroy(KVGroup*& obj, IPoolAllocator& allocator)
{
if (obj != UAVCAN_NULLPTR)
{
obj->~KVGroup();
allocator.deallocate(obj);
obj = UAVCAN_NULLPTR;
}
}
KVPair* find(const Key& key)
{
for (unsigned i = 0; i < static_cast<unsigned>(NumKV); i++)
{
if (kvs[i].match(key))
{
return kvs + i;
}
}
return UAVCAN_NULLPTR;
}
};
LinkedListRoot<KVGroup> list_;
IPoolAllocator& allocator_;
KVPair* findKey(const Key& key);
void compact();
struct YesPredicate
{
bool operator()(const Key&, const Value&) const { return true; }
};
public:
Map(IPoolAllocator& allocator) :
allocator_(allocator)
{
UAVCAN_ASSERT(Key() == Key());
}
~Map()
{
clear();
}
/**
* Returns null pointer if there's no such entry.
*/
Value* access(const Key& key);
/**
* If entry with the same key already exists, it will be replaced
*/
Value* insert(const Key& key, const Value& value);
/**
* Does nothing if there's no such entry.
*/
void remove(const Key& key);
/**
* Removes entries where the predicate returns true.
* Predicate prototype:
* bool (Key& key, Value& value)
*/
template <typename Predicate>
void removeAllWhere(Predicate predicate);
/**
* Returns first entry where the predicate returns true.
* Predicate prototype:
* bool (const Key& key, const Value& value)
*/
template <typename Predicate>
const Key* find(Predicate predicate) const;
/**
* Removes all items.
*/
void clear();
/**
* Returns a key-value pair located at the specified position from the beginning.
* Note that any insertion or deletion may greatly disturb internal ordering, so use with care.
* If index is greater than or equal the number of pairs, null pointer will be returned.
*/
KVPair* getByIndex(unsigned index);
const KVPair* getByIndex(unsigned index) const;
/**
* Complexity is O(1).
*/
bool isEmpty() const { return find(YesPredicate()) == UAVCAN_NULLPTR; }
/**
* Complexity is O(N).
*/
unsigned getSize() const;
};
// ----------------------------------------------------------------------------
/*
* Map<>
*/
template <typename Key, typename Value>
typename Map<Key, Value>::KVPair* Map<Key, Value>::findKey(const Key& key)
{
KVGroup* p = list_.get();
while (p)
{
KVPair* const kv = p->find(key);
if (kv)
{
return kv;
}
p = p->getNextListNode();
}
return UAVCAN_NULLPTR;
}
template <typename Key, typename Value>
void Map<Key, Value>::compact()
{
KVGroup* p = list_.get();
while (p)
{
KVGroup* const next = p->getNextListNode();
bool remove_this = true;
for (int i = 0; i < KVGroup::NumKV; i++)
{
if (!p->kvs[i].match(Key()))
{
remove_this = false;
break;
}
}
if (remove_this)
{
list_.remove(p);
KVGroup::destroy(p, allocator_);
}
p = next;
}
}
template <typename Key, typename Value>
Value* Map<Key, Value>::access(const Key& key)
{
UAVCAN_ASSERT(!(key == Key()));
KVPair* const kv = findKey(key);
return kv ? &kv->value : UAVCAN_NULLPTR;
}
template <typename Key, typename Value>
Value* Map<Key, Value>::insert(const Key& key, const Value& value)
{
UAVCAN_ASSERT(!(key == Key()));
remove(key);
KVPair* const kv = findKey(Key());
if (kv)
{
*kv = KVPair(key, value);
return &kv->value;
}
KVGroup* const kvg = KVGroup::instantiate(allocator_);
if (kvg == UAVCAN_NULLPTR)
{
return UAVCAN_NULLPTR;
}
list_.insert(kvg);
kvg->kvs[0] = KVPair(key, value);
return &kvg->kvs[0].value;
}
template <typename Key, typename Value>
void Map<Key, Value>::remove(const Key& key)
{
UAVCAN_ASSERT(!(key == Key()));
KVPair* const kv = findKey(key);
if (kv)
{
*kv = KVPair();
compact();
}
}
template <typename Key, typename Value>
template <typename Predicate>
void Map<Key, Value>::removeAllWhere(Predicate predicate)
{
unsigned num_removed = 0;
KVGroup* p = list_.get();
while (p != UAVCAN_NULLPTR)
{
KVGroup* const next_group = p->getNextListNode();
for (int i = 0; i < KVGroup::NumKV; i++)
{
const KVPair* const kv = p->kvs + i;
if (!kv->match(Key()))
{
if (predicate(kv->key, kv->value))
{
num_removed++;
p->kvs[i] = KVPair();
}
}
}
p = next_group;
}
if (num_removed > 0)
{
compact();
}
}
template <typename Key, typename Value>
template <typename Predicate>
const Key* Map<Key, Value>::find(Predicate predicate) const
{
KVGroup* p = list_.get();
while (p != UAVCAN_NULLPTR)
{
KVGroup* const next_group = p->getNextListNode();
for (int i = 0; i < KVGroup::NumKV; i++)
{
const KVPair* const kv = p->kvs + i;
if (!kv->match(Key()))
{
if (predicate(kv->key, kv->value))
{
return &p->kvs[i].key;
}
}
}
p = next_group;
}
return UAVCAN_NULLPTR;
}
template <typename Key, typename Value>
void Map<Key, Value>::clear()
{
removeAllWhere(YesPredicate());
}
template <typename Key, typename Value>
typename Map<Key, Value>::KVPair* Map<Key, Value>::getByIndex(unsigned index)
{
// Slowly crawling through the dynamic storage
KVGroup* p = list_.get();
while (p != UAVCAN_NULLPTR)
{
KVGroup* const next_group = p->getNextListNode();
for (int i = 0; i < KVGroup::NumKV; i++)
{
KVPair* const kv = p->kvs + i;
if (!kv->match(Key()))
{
if (index == 0)
{
return kv;
}
index--;
}
}
p = next_group;
}
return UAVCAN_NULLPTR;
}
template <typename Key, typename Value>
const typename Map<Key, Value>::KVPair* Map<Key, Value>::getByIndex(unsigned index) const
{
return const_cast<Map<Key, Value>*>(this)->getByIndex(index);
}
template <typename Key, typename Value>
unsigned Map<Key, Value>::getSize() const
{
unsigned num = 0;
KVGroup* p = list_.get();
while (p)
{
for (int i = 0; i < KVGroup::NumKV; i++)
{
const KVPair* const kv = p->kvs + i;
if (!kv->match(Key()))
{
num++;
}
}
p = p->getNextListNode();
}
return num;
}
}
#endif // UAVCAN_UTIL_MAP_HPP_INCLUDED
@@ -0,0 +1,84 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_UTIL_METHOD_BINDER_HPP_INCLUDED
#define UAVCAN_UTIL_METHOD_BINDER_HPP_INCLUDED
#include <uavcan/error.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/util/templates.hpp>
namespace uavcan
{
/**
* Use this to call member functions as callbacks in C++03 mode.
*
* In C++11 or newer you don't need it because you can use std::function<>/std::bind<> instead.
*/
template <typename ObjectPtr, typename MemFunPtr>
class UAVCAN_EXPORT MethodBinder
{
ObjectPtr obj_;
MemFunPtr fun_;
void validateBeforeCall() const
{
if (!operator bool())
{
handleFatalError("Null binder");
}
}
public:
MethodBinder()
: obj_()
, fun_()
{ }
MethodBinder(ObjectPtr o, MemFunPtr f)
: obj_(o)
, fun_(f)
{ }
/**
* Returns true if the binder is initialized (doesn't contain null pointers).
*/
operator bool() const
{
return coerceOrFallback<bool>(obj_, true) && coerceOrFallback<bool>(fun_, true);
}
/**
* Will raise a fatal error if either method pointer or object pointer are null.
*/
void operator()()
{
validateBeforeCall();
(obj_->*fun_)();
}
/**
* Will raise a fatal error if either method pointer or object pointer are null.
*/
template <typename Par1>
void operator()(Par1& p1)
{
validateBeforeCall();
(obj_->*fun_)(p1);
}
/**
* Will raise a fatal error if either method pointer or object pointer are null.
*/
template <typename Par1, typename Par2>
void operator()(Par1& p1, Par2& p2)
{
validateBeforeCall();
(obj_->*fun_)(p1, p2);
}
};
}
#endif // UAVCAN_UTIL_METHOD_BINDER_HPP_INCLUDED
@@ -0,0 +1,478 @@
/*
* Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
*/
#ifndef UAVCAN_UTIL_MULTISET_HPP_INCLUDED
#define UAVCAN_UTIL_MULTISET_HPP_INCLUDED
#include <cassert>
#include <cstdlib>
#include <uavcan/util/linked_list.hpp>
#include <uavcan/build_config.hpp>
#include <uavcan/dynamic_memory.hpp>
#include <uavcan/util/templates.hpp>
#include <uavcan/util/placement_new.hpp>
#if !defined(UAVCAN_CPP_VERSION) || !defined(UAVCAN_CPP11)
# error UAVCAN_CPP_VERSION
#endif
namespace uavcan
{
/**
* Slow but memory efficient unordered multiset. Unlike Map<>, this container does not move objects, so
* they don't have to be copyable.
*
* Items will be allocated in the node's memory pool.
*/
template <typename T>
class UAVCAN_EXPORT Multiset : Noncopyable
{
struct Item : ::uavcan::Noncopyable
{
T* ptr;
#if UAVCAN_CPP_VERSION >= UAVCAN_CPP11
alignas(T) unsigned char pool[sizeof(T)]; ///< Memory efficient version
#else
union
{
unsigned char pool[sizeof(T)];
/*
* Such alignment does not guarantee safety for all types (only for libuavcan internal ones);
* however, increasing it is too memory inefficient. So it is recommended to use C++11, where
* this issue is resolved with alignas() (see above).
*/
void* _aligner1_;
long long _aligner2_;
};
#endif
Item()
: ptr(UAVCAN_NULLPTR)
{
fill_n(pool, sizeof(pool), static_cast<unsigned char>(0));
}
~Item() { destroy(); }
bool isConstructed() const { return ptr != UAVCAN_NULLPTR; }
void destroy()
{
if (ptr != UAVCAN_NULLPTR)
{
ptr->~T();
ptr = UAVCAN_NULLPTR;
fill_n(pool, sizeof(pool), static_cast<unsigned char>(0));
}
}
};
private:
struct Chunk : LinkedListNode<Chunk>
{
enum { NumItems = (MemPoolBlockSize - sizeof(LinkedListNode<Chunk>)) / sizeof(Item) };
Item items[NumItems];
Chunk()
{
StaticAssert<(static_cast<unsigned>(NumItems) > 0)>::check();
IsDynamicallyAllocatable<Chunk>::check();
UAVCAN_ASSERT(!items[0].isConstructed());
}
static Chunk* instantiate(IPoolAllocator& allocator)
{
void* const praw = allocator.allocate(sizeof(Chunk));
if (praw == UAVCAN_NULLPTR)
{
return UAVCAN_NULLPTR;
}
return new (praw) Chunk();
}
static void destroy(Chunk*& obj, IPoolAllocator& allocator)
{
if (obj != UAVCAN_NULLPTR)
{
obj->~Chunk();
allocator.deallocate(obj);
obj = UAVCAN_NULLPTR;
}
}
Item* findFreeSlot()
{
for (unsigned i = 0; i < static_cast<unsigned>(NumItems); i++)
{
if (!items[i].isConstructed())
{
return items + i;
}
}
return UAVCAN_NULLPTR;
}
};
/*
* Data
*/
LinkedListRoot<Chunk> list_;
IPoolAllocator& allocator_;
/*
* Methods
*/
Item* findOrCreateFreeSlot();
void compact();
enum RemoveStrategy { RemoveOne, RemoveAll };
template <typename Predicate>
void removeWhere(Predicate predicate, RemoveStrategy strategy);
struct YesPredicate
{
bool operator()(const T&) const { return true; }
};
struct IndexPredicate : ::uavcan::Noncopyable
{
unsigned index;
IndexPredicate(unsigned target_index)
: index(target_index)
{ }
bool operator()(const T&)
{
return index-- == 0;
}
};
struct ComparingPredicate
{
const T& reference;
ComparingPredicate(const T& ref)
: reference(ref)
{ }
bool operator()(const T& sample)
{
return reference == sample;
}
};
template<typename Operator>
struct OperatorToFalsePredicateAdapter : ::uavcan::Noncopyable
{
Operator oper;
OperatorToFalsePredicateAdapter(Operator o)
: oper(o)
{ }
bool operator()(T& item)
{
oper(item);
return false;
}
bool operator()(const T& item) const
{
oper(item);
return false;
}
};
public:
Multiset(IPoolAllocator& allocator)
: allocator_(allocator)
{ }
~Multiset()
{
clear();
}
/**
* Creates one item in-place and returns a pointer to it.
* If creation fails due to lack of memory, UAVCAN_NULLPTR will be returned.
* Complexity is O(N).
*/
T* emplace()
{
Item* const item = findOrCreateFreeSlot();
if (item == UAVCAN_NULLPTR)
{
return UAVCAN_NULLPTR;
}
UAVCAN_ASSERT(item->ptr == UAVCAN_NULLPTR);
item->ptr = new (item->pool) T();
return item->ptr;
}
template <typename P1>
T* emplace(P1 p1)
{
Item* const item = findOrCreateFreeSlot();
if (item == UAVCAN_NULLPTR)
{
return UAVCAN_NULLPTR;
}
UAVCAN_ASSERT(item->ptr == UAVCAN_NULLPTR);
item->ptr = new (item->pool) T(p1);
return item->ptr;
}
template <typename P1, typename P2>
T* emplace(P1 p1, P2 p2)
{
Item* const item = findOrCreateFreeSlot();
if (item == UAVCAN_NULLPTR)
{
return UAVCAN_NULLPTR;
}
UAVCAN_ASSERT(item->ptr == UAVCAN_NULLPTR);
item->ptr = new (item->pool) T(p1, p2);
return item->ptr;
}
template <typename P1, typename P2, typename P3>
T* emplace(P1 p1, P2 p2, P3 p3)
{
Item* const item = findOrCreateFreeSlot();
if (item == UAVCAN_NULLPTR)
{
return UAVCAN_NULLPTR;
}
UAVCAN_ASSERT(item->ptr == UAVCAN_NULLPTR);
item->ptr = new (item->pool) T(p1, p2, p3);
return item->ptr;
}
/**
* Removes entries where the predicate returns true.
* Predicate prototype:
* bool (T& item)
*/
template <typename Predicate>
void removeAllWhere(Predicate predicate) { removeWhere<Predicate>(predicate, RemoveAll); }
template <typename Predicate>
void removeFirstWhere(Predicate predicate) { removeWhere<Predicate>(predicate, RemoveOne); }
void removeFirst(const T& ref) { removeFirstWhere(ComparingPredicate(ref)); }
void removeAll(const T& ref) { removeAllWhere(ComparingPredicate(ref)); }
void clear() { removeAllWhere(YesPredicate()); }
/**
* Returns first entry where the predicate returns true.
* Predicate prototype:
* bool (const T& item)
*/
template <typename Predicate>
T* find(Predicate predicate);
template <typename Predicate>
const T* find(Predicate predicate) const
{
return const_cast<Multiset*>(this)->find<Predicate>(predicate);
}
/**
* Calls Operator for each item of the set.
* Operator prototype:
* void (T& item)
* void (const T& item) - const overload
*/
template <typename Operator>
void forEach(Operator oper)
{
OperatorToFalsePredicateAdapter<Operator> adapter(oper);
(void)find<OperatorToFalsePredicateAdapter<Operator>&>(adapter);
}
template <typename Operator>
void forEach(Operator oper) const
{
const OperatorToFalsePredicateAdapter<Operator> adapter(oper);
(void)find<const OperatorToFalsePredicateAdapter<Operator>&>(adapter);
}
/**
* Returns an item located at the specified position from the beginning.
* Note that addition and removal operations invalidate indices.
* If index is greater than or equal the number of items, null pointer will be returned.
* Complexity is O(N).
*/
T* getByIndex(unsigned index)
{
IndexPredicate predicate(index);
return find<IndexPredicate&>(predicate);
}
const T* getByIndex(unsigned index) const
{
return const_cast<Multiset*>(this)->getByIndex(index);
}
/**
* Complexity is O(1).
*/
bool isEmpty() const { return find(YesPredicate()) == UAVCAN_NULLPTR; }
/**
* Counts number of items stored.
* Best case complexity is O(N).
*/
unsigned getSize() const;
};
// ----------------------------------------------------------------------------
/*
* Multiset<>
*/
template <typename T>
typename Multiset<T>::Item* Multiset<T>::findOrCreateFreeSlot()
{
// Search
{
Chunk* p = list_.get();
while (p)
{
Item* const dyn = p->findFreeSlot();
if (dyn != UAVCAN_NULLPTR)
{
return dyn;
}
p = p->getNextListNode();
}
}
// Create new chunk
Chunk* const chunk = Chunk::instantiate(allocator_);
if (chunk == UAVCAN_NULLPTR)
{
return UAVCAN_NULLPTR;
}
list_.insert(chunk);
return &chunk->items[0];
}
template <typename T>
void Multiset<T>::compact()
{
Chunk* p = list_.get();
while (p)
{
Chunk* const next = p->getNextListNode();
bool remove_this = true;
for (int i = 0; i < Chunk::NumItems; i++)
{
if (p->items[i].isConstructed())
{
remove_this = false;
break;
}
}
if (remove_this)
{
list_.remove(p);
Chunk::destroy(p, allocator_);
}
p = next;
}
}
template <typename T>
template <typename Predicate>
void Multiset<T>::removeWhere(Predicate predicate, const RemoveStrategy strategy)
{
unsigned num_removed = 0;
Chunk* p = list_.get();
while (p != UAVCAN_NULLPTR)
{
Chunk* const next_chunk = p->getNextListNode(); // For the case if the current entry gets modified
if ((num_removed > 0) && (strategy == RemoveOne))
{
break;
}
for (int i = 0; i < Chunk::NumItems; i++)
{
Item& item = p->items[i];
if (item.isConstructed())
{
if (predicate(*item.ptr))
{
num_removed++;
item.destroy();
if (strategy == RemoveOne)
{
break;
}
}
}
}
p = next_chunk;
}
if (num_removed > 0)
{
compact();
}
}
template <typename T>
template <typename Predicate>
T* Multiset<T>::find(Predicate predicate)
{
Chunk* p = list_.get();
while (p != UAVCAN_NULLPTR)
{
Chunk* const next_chunk = p->getNextListNode(); // For the case if the current entry gets modified
for (int i = 0; i < Chunk::NumItems; i++)
{
if (p->items[i].isConstructed())
{
if (predicate(*p->items[i].ptr))
{
return p->items[i].ptr;
}
}
}
p = next_chunk;
}
return UAVCAN_NULLPTR;
}
template <typename T>
unsigned Multiset<T>::getSize() const
{
unsigned num = 0;
Chunk* p = list_.get();
while (p)
{
for (int i = 0; i < Chunk::NumItems; i++)
{
num += p->items[i].isConstructed() ? 1U : 0U;
}
p = p->getNextListNode();
}
return num;
}
}
#endif // Include guard

Some files were not shown because too many files have changed in this diff Show More