Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bulk Read/Write API to support different addresses/parameter lengths #21

Merged
merged 2 commits into from
Feb 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions examples/example-03-bulk-read/example-03-bulk-read.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,14 @@ int main(int argc, char **argv) try
/* Read the current angle from all those servos
* and print it to std::cout.
*/
std::map<Dynamixel::Id, uint32_t> position_map;
Dynamixel::BulkReadDataMap position_map;
try
{
position_map = dynamixel_ctrl.bulkRead<uint32_t>(MX28_ControlTable_PresentPosition, id_vect);
Dynamixel::BulkReadRequestVect bulk_read_req;
for (auto id : id_vect)
bulk_read_req.push_back(std::make_tuple(id, MX28_ControlTable_PresentPosition, sizeof(uint32_t)));

position_map = dynamixel_ctrl.bulkRead(bulk_read_req);
}
catch (dynamixelplusplus::HardwareAlert const & e)
{
Expand All @@ -74,7 +78,7 @@ int main(int argc, char **argv) try

for (auto [id, position_raw] : position_map)
{
float const position_deg = static_cast<float>(position_raw) * 360.0f / 4096;
float const position_deg = static_cast<float>(std::get<uint32_t>(position_raw)) * 360.0f / 4096;
std::cout << "Dynamixel MX28 servo #"
<< static_cast<int>(id)
<< ": "
Expand Down
14 changes: 8 additions & 6 deletions examples/example-06-bulk-write/example-06-bulk-write.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,20 +107,22 @@ catch (dynamixelplusplus::StatusError const & e)

void turnLedOn(Dynamixel & dynamixel_ctrl, Dynamixel::IdVect const & id_vect)
{
std::map<Dynamixel::Id, uint8_t> led_on_data_map;
Dynamixel::BulkWriteDataVect led_on_data_vect;
uint8_t const LED_ON_VAL = 1;

for (auto id : id_vect)
led_on_data_map[id] = 1;
led_on_data_vect.push_back(std::make_tuple(id, MX28_ControlTable_LED, LED_ON_VAL));

dynamixel_ctrl.bulkWrite(MX28_ControlTable_LED, led_on_data_map);
dynamixel_ctrl.bulkWrite(led_on_data_vect);
}

void turnLedOff(Dynamixel & dynamixel_ctrl, Dynamixel::IdVect const & id_vect)
{
std::map<Dynamixel::Id, uint8_t> led_off_data_map;
Dynamixel::BulkWriteDataVect led_off_data_vect;
uint8_t const LED_OFF_VAL = 0;

for (auto id : id_vect)
led_off_data_map[id] = 0;
led_off_data_vect.push_back(std::make_tuple(id, MX28_ControlTable_LED, LED_OFF_VAL));

dynamixel_ctrl.bulkWrite(MX28_ControlTable_LED, led_off_data_map);
dynamixel_ctrl.bulkWrite(led_off_data_vect);
}
18 changes: 10 additions & 8 deletions include/dynamixel++/Dynamixel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include <vector>
#include <memory>
#include <optional>
#include <variant>

#include <dynamixel_sdk.h>

Expand Down Expand Up @@ -75,17 +76,23 @@ class Dynamixel
typedef uint8_t Id;
typedef std::vector<Id> IdVect;


IdVect broadcastPing();

void reboot(Id const id);


typedef std::map<Id, std::variant<uint8_t, uint16_t, uint32_t>> BulkReadDataMap;
typedef std::vector<std::tuple<Id, uint16_t, size_t>> BulkReadRequestVect;
BulkReadDataMap bulkRead(BulkReadRequestVect const & bulk_read_req);

typedef std::vector<std::tuple<Id, uint16_t, std::variant<uint8_t, uint16_t, uint32_t>>> BulkWriteDataVect;
void bulkWrite(BulkWriteDataVect const & bulk_write_data);


template<typename T> T read (uint16_t const start_address, Id const id);
template<typename T> std::map<Id, T> bulkRead(uint16_t const start_address, IdVect const & id_vect);
template<typename T> std::map<Id, T> syncRead(uint16_t const start_address, IdVect const & id_vect);

template<typename T> void write (uint16_t const start_address, Id const id, T const val);
template<typename T> void bulkWrite(uint16_t const start_address, std::map<Id, T> const & val_map);
template<typename T> void syncWrite(uint16_t const start_address, std::map<Id, T> const & val_map);


Expand All @@ -96,18 +103,13 @@ class Dynamixel
typedef std::tuple<Id, uint8_t *> SyncWriteData;
typedef std::vector<SyncWriteData> SyncWriteDataVect;
void syncWrite(uint16_t const start_address, uint16_t const data_length, SyncWriteDataVect const & data);
typedef SyncWriteData BulkWriteData;
typedef SyncWriteDataVect BulkWriteDataVect;
void bulkWrite(uint16_t const start_address, uint16_t const data_length, BulkWriteDataVect const & data);

typedef std::vector<std::tuple<
Id,
std::optional<uint32_t>
>
> SyncReadDataVect;
typedef SyncReadDataVect BulkReadDataVect;
SyncReadDataVect syncRead(uint16_t const start_address, uint16_t const data_length, IdVect const & id_vect);
BulkReadDataVect bulkRead(uint16_t const start_address, uint16_t const data_length, IdVect const & id_vect);
};

/**************************************************************************************
Expand Down
52 changes: 0 additions & 52 deletions include/dynamixel++/Dynamixel.ipp
Original file line number Diff line number Diff line change
Expand Up @@ -71,24 +71,6 @@ template<typename T> T Dynamixel::read(uint16_t const start_address, Id const id
return val;
}

template<typename T> std::map<Dynamixel::Id, T> Dynamixel::bulkRead(uint16_t const start_address, IdVect const & id_vect)
{
static_assert(std::is_same<T, uint8_t>::value ||
std::is_same<T, uint16_t>::value ||
std::is_same<T, uint32_t>::value, "Only uint8_t, uint16_t and uint32_t are allowed parameters.");

auto bulk_read_data_vect = bulkRead(start_address, sizeof(T), id_vect);

std::map<Id, T> val_map;
for (auto [id, opt_data] : bulk_read_data_vect)
{
if (opt_data.has_value())
val_map[id] = static_cast<T>(opt_data.value());
}

return val_map;
}

template<typename T> std::map<Dynamixel::Id, T> Dynamixel::syncRead(uint16_t const start_address, IdVect const & id_vect)
{
static_assert(std::is_same<T, uint8_t>::value ||
Expand Down Expand Up @@ -125,40 +107,6 @@ template<typename T> void Dynamixel::write(uint16_t const start_address, Id cons
throw StatusError(_packet_handler.get(), error & 0x7F);
}

template<typename T> void Dynamixel::bulkWrite(uint16_t const start_address, std::map<Id, T> const & val_map)
{
static_assert(std::is_same<T, uint8_t>::value ||
std::is_same<T, uint16_t>::value ||
std::is_same<T, uint32_t>::value, "Only uint8_t, uint16_t and uint32_t are allowed parameters.");

/* Convert the functions input data into the required
* format to feed to the Dynamixel SDK.
*/
std::vector<Id> id_vect;
std::vector<T> value_vect;
for (auto [id, val] : val_map)
{
id_vect.push_back(id);
value_vect.push_back(val);
}

/* This 2-step dance is necessary because we need to pass a pointer
* to a local variable which still needs to exist in scope until
* bulkWrite has been fully executed.
*/
BulkWriteDataVect data_vect;
for (size_t i = 0; i < id_vect.size(); i++)
{
BulkWriteData const d = std::make_tuple(id_vect[i], reinterpret_cast<uint8_t *>(&value_vect[i]));
data_vect.push_back(d);
}

/* Call the actual sync write API invoking the underlying
* DynamixelSDK sync write API.
*/
bulkWrite(start_address, sizeof(T), data_vect);
}

template<typename T> void Dynamixel::syncWrite(uint16_t const start_address, std::map<Id, T> const & val_map)
{
static_assert(std::is_same<T, uint8_t>::value ||
Expand Down
136 changes: 83 additions & 53 deletions src/Dynamixel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,30 +72,81 @@ void Dynamixel::reboot(Id const id)
throw StatusError(_packet_handler.get(), error & 0x7F);
}

/**************************************************************************************
* PRIVATE MEMBER FUNCTIONS
**************************************************************************************/

void Dynamixel::syncWrite(uint16_t const start_address, uint16_t const data_length, SyncWriteDataVect const & data)
Dynamixel::BulkReadDataMap Dynamixel::bulkRead(BulkReadRequestVect const & bulk_read_req)
{
dynamixel::GroupSyncWrite group_sync_write(_port_handler.get(), _packet_handler.get(), start_address, data_length);
BulkReadDataMap data_map;

for(auto [id, data_ptr] : data)
group_sync_write.addParam(id, data_ptr);
dynamixel::GroupBulkRead group_bulk_read(_port_handler.get(), _packet_handler.get());

if (int const res = group_sync_write.txPacket();
res != COMM_SUCCESS) {
for(auto [id, start_address, data_length] : bulk_read_req)
group_bulk_read.addParam(id, start_address, data_length);

if (int const res = group_bulk_read.txRxPacket();
res != COMM_SUCCESS) {
throw CommunicationError(_packet_handler.get(), res);
}
group_sync_write.clearParam();

for(auto [id, start_address, data_length] : bulk_read_req)
{
uint8_t dxl_error = 0;

if (group_bulk_read.getError(id, &dxl_error))
{
if (dxl_error & 0x80)
throw HardwareAlert(id);
else if(dxl_error & 0x7F)
throw StatusError(_packet_handler.get(), dxl_error & 0x7F);
}

if (group_bulk_read.isAvailable(id, start_address, data_length))
{
if (data_length == 1)
data_map[id] = static_cast<uint8_t>(group_bulk_read.getData(id, start_address, data_length));
else if (data_length == 2)
data_map[id] = static_cast<uint16_t>(group_bulk_read.getData(id, start_address, data_length));
else if (data_length == 4)
data_map[id] = static_cast<uint32_t>(group_bulk_read.getData(id, start_address, data_length));
}
}

group_bulk_read.clearParam();

return data_map;
}

void Dynamixel::bulkWrite(uint16_t const start_address, uint16_t const data_length, BulkWriteDataVect const & data)
void Dynamixel::bulkWrite(BulkWriteDataVect const & bulk_write_data)
{
dynamixel::GroupBulkWrite group_bulk_write(_port_handler.get(), _packet_handler.get());

for(auto [id, data_ptr] : data)
group_bulk_write.addParam(id, start_address, data_length, data_ptr);
/* Unfortunately we need to copy and hold the data until the bulk write
* has been completed. This works best by adding it temporarily to some
* std::vector.
*/
std::vector<std::shared_ptr<uint8_t>> tmp_val_uint8_t;
std::vector<std::shared_ptr<uint16_t>> tmp_val_uint16_t;
std::vector<std::shared_ptr<uint32_t>> tmp_val_uint32_t;

for(auto [id, start_address, value] : bulk_write_data)
{
if (std::holds_alternative<uint8_t>(value))
{
auto tmp_val = std::make_shared<uint8_t>(std::get<uint8_t>(value));
tmp_val_uint8_t.push_back(tmp_val);
group_bulk_write.addParam(id, start_address, sizeof(uint8_t), tmp_val.get());
}
else if (std::holds_alternative<uint16_t>(value))
{
auto tmp_val = std::make_shared<uint16_t>(std::get<uint16_t>(value));
tmp_val_uint16_t.push_back(tmp_val);
group_bulk_write.addParam(id, start_address, sizeof(uint16_t), reinterpret_cast<uint8_t *>(tmp_val.get()));
}
else if (std::holds_alternative<uint32_t>(value))
{
auto tmp_val = std::make_shared<uint32_t>(std::get<uint32_t>(value));
tmp_val_uint32_t.push_back(tmp_val);
group_bulk_write.addParam(id, start_address, sizeof(uint32_t), reinterpret_cast<uint8_t *>(tmp_val.get()));
}
}

if (int const res = group_bulk_write.txPacket();
res != COMM_SUCCESS) {
Expand All @@ -104,6 +155,24 @@ void Dynamixel::bulkWrite(uint16_t const start_address, uint16_t const data_leng
group_bulk_write.clearParam();
}

/**************************************************************************************
* PRIVATE MEMBER FUNCTIONS
**************************************************************************************/

void Dynamixel::syncWrite(uint16_t const start_address, uint16_t const data_length, SyncWriteDataVect const & data)
{
dynamixel::GroupSyncWrite group_sync_write(_port_handler.get(), _packet_handler.get(), start_address, data_length);

for(auto [id, data_ptr] : data)
group_sync_write.addParam(id, data_ptr);

if (int const res = group_sync_write.txPacket();
res != COMM_SUCCESS) {
throw CommunicationError(_packet_handler.get(), res);
}
group_sync_write.clearParam();
}

Dynamixel::SyncReadDataVect Dynamixel::syncRead(uint16_t const start_address, uint16_t const data_length, IdVect const & id_vect)
{
SyncReadDataVect data_vect;
Expand Down Expand Up @@ -143,45 +212,6 @@ Dynamixel::SyncReadDataVect Dynamixel::syncRead(uint16_t const start_address, ui
return data_vect;
}

Dynamixel::BulkReadDataVect Dynamixel::bulkRead(uint16_t const start_address, uint16_t const data_length, IdVect const & id_vect)
{
BulkReadDataVect data_vect;

dynamixel::GroupBulkRead group_bulk_read(_port_handler.get(), _packet_handler.get());

for(auto id : id_vect)
group_bulk_read.addParam(id, start_address, data_length);

if (int const res = group_bulk_read.txRxPacket();
res != COMM_SUCCESS) {
throw CommunicationError(_packet_handler.get(), res);
}

for(auto id : id_vect)
{
uint8_t dxl_error = 0;
if (group_bulk_read.getError(id, &dxl_error))
{
if (dxl_error & 0x80)
throw HardwareAlert(id);
else if(dxl_error & 0x7F)
throw StatusError(_packet_handler.get(), dxl_error & 0x7F);
}
}

for(auto id : id_vect)
{
if (group_bulk_read.isAvailable(id, start_address, data_length))
data_vect.push_back(std::make_tuple(id, group_bulk_read.getData(id, start_address, data_length)));
else
data_vect.push_back(std::make_tuple(id, std::nullopt));
}

group_bulk_read.clearParam();

return data_vect;
}

/**************************************************************************************
* NAMESPACE
**************************************************************************************/
Expand Down