rclnodejs 1.2.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +1 -1
  2. package/binding.gyp +1 -0
  3. package/index.js +26 -3
  4. package/lib/context.js +4 -2
  5. package/lib/lifecycle.js +9 -0
  6. package/lib/node.js +106 -18
  7. package/lib/serialization.js +60 -0
  8. package/lib/type_description_service.js +27 -1
  9. package/package.json +3 -1
  10. package/rosidl_convertor/README.md +298 -0
  11. package/rosidl_convertor/idl_convertor.js +49 -0
  12. package/rosidl_convertor/idl_convertor.py +1176 -0
  13. package/rosidl_gen/generator.json +2 -2
  14. package/rosidl_gen/index.js +21 -4
  15. package/rosidl_gen/packages.js +65 -32
  16. package/rosidl_gen/templates/message.dot +1 -1
  17. package/scripts/npmjs-readme.md +1 -1
  18. package/src/addon.cpp +2 -0
  19. package/src/macros.h +17 -1
  20. package/src/rcl_action_client_bindings.cpp +3 -2
  21. package/src/rcl_action_goal_bindings.cpp +3 -2
  22. package/src/rcl_action_server_bindings.cpp +7 -5
  23. package/src/rcl_client_bindings.cpp +4 -3
  24. package/src/rcl_context_bindings.cpp +29 -19
  25. package/src/rcl_guard_condition_bindings.cpp +3 -2
  26. package/src/rcl_lifecycle_bindings.cpp +18 -4
  27. package/src/rcl_node_bindings.cpp +105 -3
  28. package/src/rcl_publisher_bindings.cpp +4 -3
  29. package/src/rcl_serialization_bindings.cpp +116 -0
  30. package/src/rcl_serialization_bindings.h +26 -0
  31. package/src/rcl_service_bindings.cpp +4 -3
  32. package/src/rcl_subscription_bindings.cpp +3 -2
  33. package/src/rcl_time_point_bindings.cpp +3 -2
  34. package/src/rcl_timer_bindings.cpp +8 -6
  35. package/src/rcl_utilities.cpp +31 -0
  36. package/src/rcl_utilities.h +7 -0
  37. package/tsconfig.json +2 -2
  38. package/types/context.d.ts +3 -2
  39. package/types/index.d.ts +26 -1
  40. package/types/lifecycle.d.ts +7 -0
  41. package/types/node.d.ts +48 -8
@@ -0,0 +1,1176 @@
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) 2025, The Robot Web Tools Contributors
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ IDL to ROS2 Interface Converter
19
+
20
+ This tool converts ROS2 .idl files to corresponding .msg/.srv/.action files.
21
+ It parses IDL syntax and generates proper ROS2 interface definitions.
22
+ """
23
+
24
+ import os
25
+ import sys
26
+ import re
27
+ import pathlib
28
+ import argparse
29
+ from typing import List, Dict, Optional
30
+ from dataclasses import dataclass
31
+ from enum import Enum
32
+
33
+
34
+ class IdlElementType(Enum):
35
+ MESSAGE = "message"
36
+ SERVICE = "service"
37
+ ACTION = "action"
38
+
39
+
40
+ @dataclass
41
+ class IdlField:
42
+ """Represents a field in an IDL structure"""
43
+ field_type: str
44
+ name: str
45
+ is_array: bool = False
46
+ array_size: Optional[int] = None
47
+ is_sequence: bool = False
48
+ is_bounded_string: bool = False
49
+ default_value: Optional[str] = None
50
+ comment: Optional[str] = None
51
+
52
+
53
+ @dataclass
54
+ class IdlConstant:
55
+ """Represents a constant definition in an IDL structure"""
56
+ name: str
57
+ const_type: str
58
+ value: str
59
+
60
+
61
+ @dataclass
62
+ class IdlStructure:
63
+ """Represents an IDL structure (message, service part, etc.)"""
64
+ name: str
65
+ fields: List[IdlField]
66
+ constants: List[IdlConstant]
67
+ comments: List[str]
68
+
69
+
70
+ @dataclass
71
+ class IdlInterface:
72
+ """Represents a complete IDL interface definition"""
73
+ name: str
74
+ interface_type: IdlElementType
75
+ package: str
76
+ structures: List[IdlStructure] # For messages: 1 structure, services: 2 (request/response), actions: 3 (goal/result/feedback)
77
+
78
+
79
+ class IdlParser:
80
+ """Parser for IDL files"""
81
+
82
+ # Type mapping from IDL to ROS2
83
+ TYPE_MAPPING = {
84
+ 'boolean': 'bool',
85
+ 'octet': 'byte', # IDL octet maps to ROS2 byte
86
+ 'char': 'char',
87
+ 'wchar': 'wchar',
88
+ 'int8': 'int8',
89
+ 'uint8': 'uint8',
90
+ 'int16': 'int16',
91
+ 'uint16': 'uint16',
92
+ 'int32': 'int32',
93
+ 'uint32': 'uint32',
94
+ 'int64': 'int64',
95
+ 'uint64': 'uint64',
96
+ 'float': 'float32',
97
+ 'double': 'float64',
98
+ 'long double': 'float64',
99
+ 'string': 'string',
100
+ 'wstring': 'wstring',
101
+ }
102
+
103
+ def __init__(self):
104
+ self.includes = []
105
+ self.current_package = ""
106
+ self.current_module = ""
107
+ self.typedefs = {} # Store typedef declarations
108
+
109
+ def _contains_key_annotations(self, content: str) -> bool:
110
+ """Check if IDL content contains @key annotations or references to keyed types (not supported in ROS2 .msg)"""
111
+ # Direct @key annotations
112
+ if '@key' in content:
113
+ return True
114
+
115
+ # Check for references to known keyed types
116
+ keyed_type_patterns = [
117
+ r'test_msgs::msg::KeyedString',
118
+ r'test_msgs::msg::KeyedLong',
119
+ r'KeyedString',
120
+ r'KeyedLong'
121
+ ]
122
+
123
+ import re
124
+ for pattern in keyed_type_patterns:
125
+ if re.search(pattern, content):
126
+ return True
127
+
128
+ return False
129
+
130
+ def parse_file(self, idl_file_path: str) -> List[IdlInterface]:
131
+ """Parse an IDL file and return list of interfaces"""
132
+ with open(idl_file_path, 'r') as f:
133
+ content = f.read()
134
+
135
+ return self.parse_content(content, idl_file_path)
136
+
137
+ def parse_content(self, content: str, file_path: str = "") -> List[IdlInterface]:
138
+ """Parse IDL content string"""
139
+ interfaces = []
140
+
141
+ # Check for unsupported features
142
+ if self._contains_key_annotations(content):
143
+ # Determine the specific reason for skipping
144
+ if '@key' in content:
145
+ reason = "contains @key annotations"
146
+ else:
147
+ reason = "references keyed types"
148
+ print(f"Warning: Skipping {file_path} - {reason} which are not supported in ROS2 .msg files")
149
+ return interfaces
150
+
151
+ # Extract modules and their contents BEFORE preprocessing (to preserve @verbatim)
152
+ modules = self._extract_modules(content)
153
+
154
+ for module_info in modules:
155
+ module_name = module_info['name']
156
+ module_content = module_info['content']
157
+
158
+ # Set current package for type mapping
159
+ self.current_package = module_name.split('::')[0]
160
+
161
+ # Parse typedefs FIRST from raw content (before preprocessing removes them)
162
+ self._parse_typedefs(module_content)
163
+
164
+ # Parse structures (to extract verbatim comments before preprocessing)
165
+ structures = self._parse_structures(module_content)
166
+
167
+ # Now preprocess the content to remove comments and normalize
168
+ clean_content = self._preprocess_content(module_content)
169
+
170
+ # Parse constants from nested modules from clean content
171
+ constants = self._parse_constants_from_modules(clean_content)
172
+
173
+ # Add constants to the main structure (if any structures exist)
174
+ if structures and constants:
175
+ # Add constants to the first structure (typically the main message structure)
176
+ structures[0].constants.extend(constants)
177
+
178
+ for struct in structures:
179
+ # Determine interface type based on naming convention or structure
180
+ interface_type = self._determine_interface_type(struct, file_path)
181
+
182
+ interface = IdlInterface(
183
+ name=struct.name,
184
+ interface_type=interface_type,
185
+ package=module_name,
186
+ structures=[struct]
187
+ )
188
+ interfaces.append(interface)
189
+
190
+ return interfaces
191
+
192
+ def _preprocess_content(self, content: str) -> str:
193
+ """Remove comments and normalize whitespace"""
194
+ # Remove @verbatim blocks using a more robust approach
195
+ # Find and remove complete @verbatim blocks that may span multiple lines
196
+ lines = content.split('\n')
197
+ processed_lines = []
198
+ in_verbatim = False
199
+ paren_count = 0
200
+
201
+ for line in lines:
202
+ if '@verbatim' in line and not in_verbatim:
203
+ in_verbatim = True
204
+ paren_count = line.count('(') - line.count(')')
205
+ continue
206
+ elif in_verbatim:
207
+ paren_count += line.count('(') - line.count(')')
208
+ if paren_count <= 0:
209
+ in_verbatim = False
210
+ continue
211
+
212
+ # Remove regular comments
213
+ if '//' in line:
214
+ line = line[:line.index('//')]
215
+
216
+ processed_lines.append(line)
217
+
218
+ content = '\n'.join(processed_lines)
219
+
220
+ # Remove multi-line comments
221
+ content = re.sub(r'/\*.*?\*/', '', content, flags=re.DOTALL)
222
+
223
+ return content
224
+
225
+ def _extract_modules(self, content: str) -> List[Dict]:
226
+ """Extract module definitions from content"""
227
+ modules = []
228
+
229
+ # Find module blocks - improved pattern for better nested module handling
230
+ # This pattern will match modules even with complex nested structures
231
+ pos = 0
232
+ while True:
233
+ # Find next module declaration
234
+ module_match = re.search(r'module\s+(\w+)\s*\{', content[pos:])
235
+ if not module_match:
236
+ break
237
+
238
+ module_name = module_match.group(1)
239
+ start_pos = pos + module_match.end() - 1 # Position of opening brace
240
+
241
+ # Count braces to find the matching closing brace
242
+ brace_count = 1
243
+ current_pos = start_pos + 1
244
+
245
+ while current_pos < len(content) and brace_count > 0:
246
+ if content[current_pos] == '{':
247
+ brace_count += 1
248
+ elif content[current_pos] == '}':
249
+ brace_count -= 1
250
+ current_pos += 1
251
+
252
+ if brace_count == 0:
253
+ # Found the matching closing brace
254
+ module_content = content[start_pos + 1:current_pos - 1]
255
+
256
+ # Handle nested modules recursively
257
+ nested_modules = self._extract_modules(module_content)
258
+ if nested_modules:
259
+ for nested in nested_modules:
260
+ nested['name'] = f"{module_name}::{nested['name']}"
261
+ modules.append(nested)
262
+
263
+ # Always add the current module as well
264
+ modules.append({
265
+ 'name': module_name,
266
+ 'content': module_content
267
+ })
268
+
269
+ pos = current_pos
270
+ else:
271
+ # Unmatched braces, skip this occurrence
272
+ pos = pos + module_match.end()
273
+
274
+ return modules
275
+
276
+ def _parse_typedefs(self, content: str):
277
+ """Parse typedef declarations from module content"""
278
+ # First pass: find simple typedefs like: typedef test_msgs::msg::Arrays test_msgs__msg__Arrays;
279
+ simple_typedef_pattern = r'typedef\s+([^;\s]+)\s+([^;\[\s]+)\s*;'
280
+ matches = re.finditer(simple_typedef_pattern, content, re.DOTALL)
281
+
282
+ for match in matches:
283
+ source_type = match.group(1).strip()
284
+ target_name = match.group(2).strip()
285
+
286
+ # Skip if this is actually an array typedef (contains [])
287
+ if '[' not in match.group(0):
288
+ # Map the source type and store the simple typedef
289
+ ros_source_type = self._map_type(source_type)
290
+ self.typedefs[target_name] = {
291
+ 'base_type': ros_source_type,
292
+ 'array_size': None
293
+ }
294
+
295
+ # Second pass: find array typedefs like: typedef double double__9[9];
296
+ array_typedef_pattern = r'typedef\s+([^[\s]+)\s+(\w+)\[(\d+)\]\s*;'
297
+ matches = re.finditer(array_typedef_pattern, content, re.DOTALL)
298
+
299
+ for match in matches:
300
+ base_type = match.group(1)
301
+ typedef_name = match.group(2)
302
+ array_size = int(match.group(3))
303
+
304
+ # Map the base type and store the typedef
305
+ ros_base_type = self._map_type(base_type)
306
+ self.typedefs[typedef_name] = {
307
+ 'base_type': ros_base_type,
308
+ 'array_size': array_size
309
+ }
310
+
311
+ def _parse_constants_from_modules(self, content: str) -> List[IdlConstant]:
312
+ """Parse constants from nested constant modules"""
313
+ constants = []
314
+
315
+ # Find constant modules like: module SomeConstants { const uint8 NAME = VALUE; };
316
+ const_module_pattern = r'module\s+(\w*[Cc]onstants?\w*)\s*\{([^{}]*(?:\{[^{}]*\}[^{}]*)*)\}'
317
+ matches = re.finditer(const_module_pattern, content, re.DOTALL)
318
+
319
+ for match in matches:
320
+ module_name = match.group(1)
321
+ module_content = match.group(2)
322
+
323
+ # Find const declarations within the module
324
+ const_pattern = r'const\s+(\w+)\s+(\w+)\s*=\s*([^;]+);'
325
+ const_matches = re.finditer(const_pattern, module_content)
326
+
327
+ for const_match in const_matches:
328
+ const_type = const_match.group(1)
329
+ const_name = const_match.group(2)
330
+ const_value = const_match.group(3).strip()
331
+
332
+ # Map the type to ROS2 type
333
+ ros_type = self._map_type(const_type)
334
+
335
+ constant = IdlConstant(
336
+ name=const_name,
337
+ const_type=ros_type,
338
+ value=const_value
339
+ )
340
+ constants.append(constant)
341
+
342
+ return constants
343
+
344
+ def _parse_structures(self, content: str) -> List[IdlStructure]:
345
+ """Parse structure definitions from module content"""
346
+ structures = []
347
+
348
+ # Find struct definitions
349
+ struct_pattern = r'struct\s+(\w+)\s*\{([^{}]*(?:\{[^{}]*\}[^{}]*)*)\}'
350
+ matches = re.finditer(struct_pattern, content, re.DOTALL)
351
+
352
+ for match in matches:
353
+ struct_name = match.group(1)
354
+ struct_content = match.group(2)
355
+
356
+ # Extract comments from @verbatim blocks before the struct
357
+ comments = self._extract_verbatim_comments(content, match.start())
358
+
359
+ # Parse fields from the struct content (need to preprocess it first)
360
+ clean_struct_content = self._preprocess_content(struct_content)
361
+ fields = self._parse_fields(clean_struct_content, struct_content, struct_name) # Pass both clean, original, and struct name
362
+
363
+ structure = IdlStructure(
364
+ name=struct_name,
365
+ fields=fields,
366
+ constants=[],
367
+ comments=comments
368
+ )
369
+ structures.append(structure)
370
+
371
+ return structures
372
+
373
+ def _extract_verbatim_comments(self, content: str, struct_start_pos: int) -> List[str]:
374
+ """Extract comments from @verbatim blocks immediately before a struct definition"""
375
+ comments = []
376
+
377
+ # Look backwards from struct position to find the most recent @verbatim block
378
+ content_before_struct = content[:struct_start_pos]
379
+
380
+ # Try to find the last @verbatim block before the struct
381
+ # Look for pattern: @verbatim (language="comment", text="...") struct
382
+
383
+ # First, try single-line @verbatim pattern
384
+ single_line_pattern = r'@verbatim\s*\(\s*language\s*=\s*"comment"\s*,\s*text\s*=\s*"([^"]+)"\s*\)\s*$'
385
+
386
+ # Split into lines and work backwards
387
+ lines = content_before_struct.split('\n')
388
+
389
+ for line in reversed(lines):
390
+ line_stripped = line.strip()
391
+
392
+ # Skip empty lines and braces
393
+ if not line_stripped or line_stripped in ['}', '};']:
394
+ continue
395
+
396
+ # If we hit non-verbatim content that's not empty/closing, stop looking
397
+ if not line_stripped.startswith('@verbatim') and line_stripped:
398
+ # Unless it's just whitespace or a closing brace, stop
399
+ if not (line_stripped == '}' or line_stripped == '};' or not line_stripped):
400
+ break
401
+
402
+ # Look for @verbatim
403
+ if '@verbatim' in line and 'language="comment"' in line:
404
+ # Try single-line match first
405
+ match = re.search(single_line_pattern, line)
406
+ if match:
407
+ comment_text = match.group(1).strip().replace('\\n', '\n')
408
+ return [comment_text]
409
+
410
+ # For multi-line, we need to look at the next line
411
+ # This handles cases like:
412
+ # @verbatim (language="comment", text=
413
+ # "The comment text")
414
+ break
415
+
416
+ # If single-line didn't work, try multi-line pattern
417
+ # Look for @verbatim blocks that span multiple lines
418
+ multi_line_pattern = r'@verbatim\s*\(\s*language\s*=\s*"comment"\s*,\s*text\s*=\s*"([^"]+)"\s*\)'
419
+
420
+ # Search in a reasonable window before the struct (last 500 characters)
421
+ search_window = content_before_struct[-500:] if len(content_before_struct) > 500 else content_before_struct
422
+
423
+ matches = list(re.finditer(multi_line_pattern, search_window, re.DOTALL))
424
+ if matches:
425
+ # Take the last match (closest to the struct)
426
+ last_match = matches[-1]
427
+ comment_text = last_match.group(1).strip().replace('\\n', '\n')
428
+ return [comment_text]
429
+
430
+ return comments
431
+
432
+ def _extract_inline_verbatim_comments(self, content: str) -> Dict[str, str]:
433
+ """Extract comments from @verbatim blocks that appear before field definitions"""
434
+ field_comments = {}
435
+ lines = content.split('\n')
436
+
437
+ i = 0
438
+ while i < len(lines):
439
+ line = lines[i].strip()
440
+ if '@verbatim' in line and 'language="comment"' in line:
441
+ # Extract the comment text - handle multi-line format
442
+ comment_text = ""
443
+
444
+ # Look for text on the same line first
445
+ text_match = re.search(r'text\s*=\s*"([^"]*)"', line)
446
+ if text_match:
447
+ comment_text = text_match.group(1)
448
+ else:
449
+ # Look for text in subsequent lines - handle concatenated strings
450
+ j = i + 1
451
+ in_text = True # Set to True since we found 'text=' in the @verbatim line
452
+ text_parts = []
453
+
454
+ while j < len(lines):
455
+ next_line = lines[j].strip()
456
+
457
+ if in_text:
458
+ # Handle different string concatenation patterns
459
+ if next_line.startswith('"') and next_line.endswith('" "'):
460
+ # Pattern: "text" "
461
+ text_content = next_line[1:-3] # Remove start quote and end quote+space+quote
462
+ if text_content:
463
+ text_parts.append(text_content)
464
+ elif next_line.startswith('"') and next_line.endswith('")'):
465
+ # Pattern: "text")
466
+ text_content = next_line[1:-2] # Remove start quote and end quote+paren
467
+ if text_content:
468
+ text_parts.append(text_content)
469
+ elif next_line == '"':
470
+ # Just a newline marker
471
+ text_parts.append('\n')
472
+ elif '"' in next_line:
473
+ # Extract all quoted content
474
+ quote_matches = re.findall(r'"([^"]*)"', next_line)
475
+ for quote_match in quote_matches:
476
+ if quote_match: # Skip empty strings unless they're newlines
477
+ text_parts.append(quote_match)
478
+
479
+ # Check if we've reached the end of the verbatim block
480
+ if ')' in next_line and in_text:
481
+ break
482
+ j += 1
483
+
484
+ comment_text = ''.join(text_parts)
485
+
486
+ # Convert \n to actual newlines and clean up
487
+ comment_text = comment_text.replace('\\n', '\n').strip()
488
+
489
+ # Find the next field definition
490
+ k = j + 1 # Start after the verbatim block ends
491
+ while k < len(lines):
492
+ field_line = lines[k].strip()
493
+ if field_line and not field_line.startswith('@') and not field_line.startswith('//') and not field_line.startswith('"'):
494
+ # Extract field name
495
+ field_match = re.search(r'\b(\w+)\s*;', field_line)
496
+ if field_match:
497
+ field_name = field_match.group(1)
498
+ if comment_text:
499
+ field_comments[field_name] = comment_text
500
+ break
501
+ k += 1
502
+ i += 1
503
+
504
+ return field_comments
505
+
506
+ def _parse_fields(self, struct_content: str, original_content: str = None, struct_name: str = None) -> List[IdlField]:
507
+ """Parse field definitions from struct content"""
508
+ fields = []
509
+
510
+ # Use original content for default value extraction if provided
511
+ content_for_defaults = original_content if original_content else struct_content
512
+
513
+ # First, extract default values from the original content
514
+ default_values = self._extract_default_values(content_for_defaults)
515
+
516
+ # Extract verbatim comments from within the struct
517
+ inline_comments = self._extract_inline_verbatim_comments(content_for_defaults)
518
+
519
+ # Remove @verbatim blocks and @default annotations from the clean content
520
+ cleaned_content = self._remove_verbatim_blocks(struct_content)
521
+
522
+ # Split by semicolon and process each field
523
+ field_lines = [line.strip() for line in cleaned_content.split(';') if line.strip()]
524
+
525
+ for field_line in field_lines:
526
+ field = self._parse_single_field(field_line, struct_name)
527
+ if field and field.name in default_values:
528
+ field.default_value = default_values[field.name]
529
+ # Add inline comment if available
530
+ if field and field.name in inline_comments:
531
+ field.comment = inline_comments[field.name]
532
+ if field:
533
+ fields.append(field)
534
+
535
+ return fields
536
+
537
+ def _extract_default_values(self, content: str) -> Dict[str, str]:
538
+ """Extract default values from @default annotations"""
539
+ default_values = {}
540
+ lines = content.split('\n')
541
+
542
+ i = 0
543
+ while i < len(lines):
544
+ line = lines[i].strip()
545
+ if '@default' in line:
546
+ # Use regex to extract the value from @default (value=...)
547
+ # Handle nested parentheses in the value
548
+ default_match = re.search(r'@default\s*\(\s*value\s*=\s*(.+)\)\s*$', line)
549
+ if default_match:
550
+ default_value = default_match.group(1).strip()
551
+
552
+ # Handle different value formats
553
+ if default_value.startswith('"') and default_value.endswith('"'):
554
+ # Quoted string - preserve quotes for string fields
555
+ inner_value = default_value[1:-1]
556
+ if inner_value.startswith('(') and inner_value.endswith(')'):
557
+ # It's a quoted tuple like "(False, True, False)"
558
+ inner_content = inner_value[1:-1]
559
+ # Replace Python boolean constants with ROS2 format
560
+ inner_content = inner_content.replace('False', 'false').replace('True', 'true')
561
+ # For string arrays, convert single quotes to double quotes
562
+ if "'" in inner_content:
563
+ inner_content = inner_content.replace("'", '"')
564
+ default_value = '[' + inner_content + ']'
565
+ else:
566
+ # For string fields, first unescape any escaped quotes
567
+ unescaped_value = inner_value.replace('\\"', '"').replace("\\'", "'")
568
+
569
+ # Now apply quoting logic based on content
570
+ if '"' in unescaped_value and "'" not in unescaped_value:
571
+ # Has double quotes only - use single quotes to wrap
572
+ default_value = "'" + unescaped_value + "'"
573
+ elif "'" in unescaped_value and '"' not in unescaped_value:
574
+ # Has single quotes only - use double quotes to wrap
575
+ default_value = '"' + unescaped_value + '"'
576
+ elif "'" in unescaped_value and '"' in unescaped_value:
577
+ # Has both - escape single quotes and use single quotes to wrap
578
+ escaped_value = unescaped_value.replace("'", "\\'")
579
+ default_value = "'" + escaped_value + "'"
580
+ else:
581
+ # No internal quotes - use double quotes
582
+ default_value = '"' + unescaped_value + '"'
583
+ elif default_value.startswith('(') and default_value.endswith(')'):
584
+ # Unquoted tuple format (a, b, c) to array format [a, b, c]
585
+ inner_content = default_value[1:-1]
586
+ inner_content = inner_content.replace('False', 'false').replace('True', 'true')
587
+ default_value = '[' + inner_content + ']'
588
+ else:
589
+ # Simple value - convert boolean constants and clean up decimals
590
+ if default_value == 'FALSE':
591
+ default_value = 'false'
592
+ elif default_value == 'TRUE':
593
+ default_value = 'true'
594
+ elif default_value.endswith('.0'):
595
+ # Convert 0.0 to 0, 1.0 to 1, etc.
596
+ try:
597
+ float_val = float(default_value)
598
+ if float_val.is_integer():
599
+ default_value = str(int(float_val))
600
+ except ValueError:
601
+ pass # Keep original value if conversion fails
602
+
603
+ # Look for the field definition in the next lines
604
+ j = i + 1
605
+ while j < len(lines):
606
+ next_line = lines[j].strip()
607
+ if next_line and not next_line.startswith('@') and not next_line.startswith('//'):
608
+ # Extract field name from this line
609
+ field_match = re.search(r'\b(\w+)\s*;', next_line)
610
+ if field_match:
611
+ field_name = field_match.group(1)
612
+ default_values[field_name] = default_value
613
+ break
614
+ j += 1
615
+ i += 1
616
+
617
+ return default_values
618
+
619
+ def _remove_verbatim_blocks(self, content: str) -> str:
620
+ """Remove @verbatim and @default blocks from content"""
621
+ lines = content.split('\n')
622
+ processed_lines = []
623
+ in_verbatim = False
624
+ paren_count = 0
625
+
626
+ for line in lines:
627
+ # Skip @verbatim blocks
628
+ if '@verbatim' in line and not in_verbatim:
629
+ in_verbatim = True
630
+ paren_count = line.count('(') - line.count(')')
631
+ continue
632
+ elif in_verbatim:
633
+ paren_count += line.count('(') - line.count(')')
634
+ if paren_count <= 0:
635
+ in_verbatim = False
636
+ continue
637
+
638
+ # Skip @default annotations completely
639
+ if '@default' in line:
640
+ continue
641
+
642
+ # Skip @unit annotations completely
643
+ if '@unit' in line:
644
+ continue
645
+
646
+ processed_lines.append(line)
647
+
648
+ return '\n'.join(processed_lines)
649
+
650
+ def _parse_single_field(self, field_line: str, struct_name: str = None) -> Optional[IdlField]:
651
+ """Parse a single field definition"""
652
+ field_line = field_line.strip()
653
+ if not field_line:
654
+ return None
655
+
656
+ # Handle sequence types: sequence<type> name or sequence<type, bound> name
657
+ sequence_match = re.match(r'sequence<([^,>]+)(?:,\s*(\d+))?>\s+(\w+)', field_line)
658
+ if sequence_match:
659
+ inner_type = sequence_match.group(1).strip()
660
+ bound = sequence_match.group(2)
661
+ field_name = sequence_match.group(3)
662
+
663
+ # Map the inner type with field name context
664
+ ros_type = self._map_type_with_context(inner_type, field_name, struct_name)
665
+
666
+ # Handle bounded sequence
667
+ if bound:
668
+ bound_value = int(bound)
669
+ return IdlField(
670
+ field_type=ros_type,
671
+ name=field_name,
672
+ is_sequence=True,
673
+ is_array=True,
674
+ array_size=bound_value # Store bound as array_size for bounded sequences
675
+ )
676
+ else:
677
+ return IdlField(
678
+ field_type=ros_type,
679
+ name=field_name,
680
+ is_sequence=True,
681
+ is_array=True
682
+ )
683
+
684
+ # Handle array types: type[size] name or type[] name
685
+ array_match = re.match(r'([^[\s]+)\s*\[([^\]]*)\]\s+(\w+)', field_line)
686
+ if array_match:
687
+ base_type = array_match.group(1)
688
+ array_size_str = array_match.group(2)
689
+ field_name = array_match.group(3)
690
+
691
+ ros_type = self._map_type_with_context(base_type, field_name, struct_name)
692
+ array_size = int(array_size_str) if array_size_str.isdigit() else None
693
+
694
+ return IdlField(
695
+ field_type=ros_type,
696
+ name=field_name,
697
+ is_array=True,
698
+ array_size=array_size
699
+ )
700
+
701
+ # Handle bounded strings: string<size> name
702
+ bounded_string_match = re.match(r'string<(\d+)>\s+(\w+)', field_line)
703
+ if bounded_string_match:
704
+ bound_size = int(bounded_string_match.group(1))
705
+ field_name = bounded_string_match.group(2)
706
+
707
+ return IdlField(
708
+ field_type='string',
709
+ name=field_name,
710
+ is_array=True, # Use is_array to indicate bounded
711
+ is_bounded_string=True,
712
+ array_size=bound_size
713
+ )
714
+
715
+ # Handle regular types: type name or type<params> name
716
+ regular_match = re.match(r'([^:\s]+(?:::[^:\s]+)*)\s+(\w+)', field_line)
717
+ if regular_match:
718
+ field_type = regular_match.group(1)
719
+ field_name = regular_match.group(2)
720
+
721
+ ros_type = self._map_type_with_context(field_type, field_name, struct_name)
722
+
723
+ # Check if this is a typedef array
724
+ if field_type in self.typedefs:
725
+ typedef_info = self.typedefs[field_type]
726
+ # Apply context mapping to the typedef base type
727
+ contextual_type = self._map_type_with_context(typedef_info['base_type'], field_name, struct_name)
728
+ return IdlField(
729
+ field_type=contextual_type,
730
+ name=field_name,
731
+ is_array=True,
732
+ array_size=typedef_info['array_size']
733
+ )
734
+
735
+ return IdlField(
736
+ field_type=ros_type,
737
+ name=field_name
738
+ )
739
+
740
+ return None
741
+
742
+ def _map_type(self, idl_type: str) -> str:
743
+ """Map IDL type to ROS2 type"""
744
+ # Check if it's a typedef first
745
+ if idl_type in self.typedefs:
746
+ typedef_info = self.typedefs[idl_type]
747
+ return typedef_info['base_type']
748
+
749
+ # Handle namespaced types (e.g., std_msgs::msg::Header)
750
+ if '::' in idl_type:
751
+ parts = idl_type.split('::')
752
+ if len(parts) >= 3:
753
+ # For types like package::msg::Type, check if it's in the same package context
754
+ package = parts[0]
755
+ msg_type = parts[-1]
756
+
757
+ # If it's the same package we're currently processing, just use the type name
758
+ if package == self.current_package or package == 'rmw_dds_common' or package == 'test_msgs':
759
+ return msg_type
760
+ else:
761
+ return f"{package}/{msg_type}"
762
+ else:
763
+ return idl_type.replace('::', '/')
764
+
765
+ # Handle basic types
766
+ return self.TYPE_MAPPING.get(idl_type, idl_type)
767
+
768
+ def _map_type_with_context(self, idl_type: str, field_name: str, struct_name: str = None) -> str:
769
+ """Map IDL type to ROS2 type with field name context"""
770
+ # Special case: uint8 with "char" in field name should map to char
771
+ if idl_type == 'uint8' and 'char' in field_name.lower():
772
+ return 'char'
773
+
774
+ # Special case: uint8 in Char struct should map to char
775
+ if idl_type == 'uint8' and struct_name and 'char' in struct_name.lower():
776
+ return 'char'
777
+
778
+ # Otherwise use regular mapping
779
+ return self._map_type(idl_type)
780
+
781
+ def _determine_interface_type(self, structure: IdlStructure, file_path: str) -> IdlElementType:
782
+ """Determine if structure represents a message, service, or action"""
783
+ file_name = os.path.basename(file_path).lower()
784
+ struct_name = structure.name.lower()
785
+
786
+ # Check for service patterns - be more specific to avoid false positives
787
+ if ('.srv' in file_path or
788
+ struct_name.endswith('_request') or struct_name.endswith('_response') or
789
+ struct_name == 'request' or struct_name == 'response'):
790
+ return IdlElementType.SERVICE
791
+ elif ('.action' in file_path or
792
+ struct_name.endswith('_goal') or struct_name.endswith('_result') or struct_name.endswith('_feedback') or
793
+ struct_name == 'goal' or struct_name == 'result' or struct_name == 'feedback'):
794
+ return IdlElementType.ACTION
795
+ else:
796
+ return IdlElementType.MESSAGE
797
+
798
+
799
+ class RosInterfaceGenerator:
800
+ """Generates ROS2 interface files from IDL interfaces"""
801
+
802
+ def __init__(self, output_dir: str = "ros_interfaces"):
803
+ self.output_dir = pathlib.Path(output_dir)
804
+ self.output_dir.mkdir(exist_ok=True)
805
+
806
+ # Create subdirectories
807
+ self.msg_dir = self.output_dir / "msg"
808
+ self.srv_dir = self.output_dir / "srv"
809
+ self.action_dir = self.output_dir / "action"
810
+
811
+ self.msg_dir.mkdir(exist_ok=True)
812
+ self.srv_dir.mkdir(exist_ok=True)
813
+ self.action_dir.mkdir(exist_ok=True)
814
+
815
+ # Store interfaces for service/action combining
816
+ self.service_parts = {}
817
+ self.action_parts = {}
818
+
819
+ def generate_interfaces(self, interfaces: List[IdlInterface]) -> List[str]:
820
+ """Generate ROS2 interface files from IDL interfaces"""
821
+ generated_files = []
822
+
823
+ # First pass: collect service and action parts
824
+ for interface in interfaces:
825
+ if interface.interface_type == IdlElementType.SERVICE:
826
+ self._collect_service_part(interface)
827
+ elif interface.interface_type == IdlElementType.ACTION:
828
+ self._collect_action_part(interface)
829
+
830
+ # Second pass: generate files
831
+ for interface in interfaces:
832
+ if interface.interface_type == IdlElementType.MESSAGE:
833
+ file_path = self._generate_message(interface)
834
+ if file_path:
835
+ generated_files.append(str(file_path))
836
+
837
+ # Generate combined service files
838
+ generated_files.extend(self._generate_service_files())
839
+
840
+ # Generate combined action files
841
+ generated_files.extend(self._generate_action_files())
842
+
843
+ return generated_files
844
+
845
+ def _collect_service_part(self, interface: IdlInterface):
846
+ """Collect service request/response parts"""
847
+ if interface.name.endswith('_Request'):
848
+ base_name = interface.name[:-8] # Remove '_Request'
849
+ if base_name not in self.service_parts:
850
+ self.service_parts[base_name] = {}
851
+ self.service_parts[base_name]['request'] = interface
852
+ elif interface.name.endswith('_Response'):
853
+ base_name = interface.name[:-9] # Remove '_Response'
854
+ if base_name not in self.service_parts:
855
+ self.service_parts[base_name] = {}
856
+ self.service_parts[base_name]['response'] = interface
857
+
858
+ def _collect_action_part(self, interface: IdlInterface):
859
+ """Collect action goal/result/feedback parts"""
860
+ name = interface.name
861
+ if name.endswith('_Goal'):
862
+ base_name = name[:-5] # Remove '_Goal'
863
+ if base_name not in self.action_parts:
864
+ self.action_parts[base_name] = {}
865
+ self.action_parts[base_name]['goal'] = interface
866
+ elif name.endswith('Goal'):
867
+ base_name = name[:-4] # Remove 'Goal'
868
+ if base_name not in self.action_parts:
869
+ self.action_parts[base_name] = {}
870
+ self.action_parts[base_name]['goal'] = interface
871
+ elif name.endswith('_Result'):
872
+ base_name = name[:-7] # Remove '_Result'
873
+ if base_name not in self.action_parts:
874
+ self.action_parts[base_name] = {}
875
+ self.action_parts[base_name]['result'] = interface
876
+ elif name.endswith('Result'):
877
+ base_name = name[:-6] # Remove 'Result'
878
+ if base_name not in self.action_parts:
879
+ self.action_parts[base_name] = {}
880
+ self.action_parts[base_name]['result'] = interface
881
+ elif name.endswith('_Feedback'):
882
+ base_name = name[:-9] # Remove '_Feedback'
883
+ if base_name not in self.action_parts:
884
+ self.action_parts[base_name] = {}
885
+ self.action_parts[base_name]['feedback'] = interface
886
+ elif name.endswith('Feedback'):
887
+ base_name = name[:-8] # Remove 'Feedback'
888
+ if base_name not in self.action_parts:
889
+ self.action_parts[base_name] = {}
890
+ self.action_parts[base_name]['feedback'] = interface
891
+
892
+ def _generate_service_files(self) -> List[str]:
893
+ """Generate .srv files from collected service parts"""
894
+ generated_files = []
895
+
896
+ for service_name, parts in self.service_parts.items():
897
+ if 'request' in parts and 'response' in parts:
898
+ file_path = self._generate_combined_service(service_name, parts['request'], parts['response'])
899
+ if file_path:
900
+ generated_files.append(str(file_path))
901
+
902
+ return generated_files
903
+
904
+ def _generate_action_files(self) -> List[str]:
905
+ """Generate .action files from collected action parts"""
906
+ generated_files = []
907
+
908
+ for action_name, parts in self.action_parts.items():
909
+ if all(key in parts for key in ['goal', 'result', 'feedback']):
910
+ file_path = self._generate_combined_action(action_name, parts)
911
+ if file_path:
912
+ generated_files.append(str(file_path))
913
+
914
+ return generated_files
915
+
916
+ def _generate_combined_service(self, service_name: str, request_interface: IdlInterface, response_interface: IdlInterface) -> Optional[pathlib.Path]:
917
+ """Generate a combined .srv file"""
918
+ lines = []
919
+
920
+ # Add request fields
921
+ if request_interface.structures:
922
+ structure = request_interface.structures[0]
923
+ # Add structure comments as comment for first field (if no field comment exists)
924
+ for i, field in enumerate(structure.fields):
925
+ # Add field comment if present, or structure comment for first field
926
+ if field.comment:
927
+ if lines: # Add blank line before field comment if not first
928
+ lines.append("")
929
+ comment_lines = field.comment.split('\n')
930
+ for comment_line in comment_lines:
931
+ # Unescape quotes in comments
932
+ comment_line = comment_line.replace('\\"', '"')
933
+ lines.append(f"# {comment_line}")
934
+ elif i == 0 and structure.comments:
935
+ # Use structure comment for first field if field has no comment
936
+ for comment in structure.comments:
937
+ comment_lines = comment.split('\n')
938
+ for comment_line in comment_lines:
939
+ lines.append(f"# {comment_line}")
940
+ lines.append(self._format_field(field))
941
+
942
+ # Add separator
943
+ lines.append("---")
944
+
945
+ # Add response fields
946
+ if response_interface.structures:
947
+ structure = response_interface.structures[0]
948
+ # Add structure comments as comment for first field (if no field comment exists)
949
+ for i, field in enumerate(structure.fields):
950
+ # Add field comment if present, or structure comment for first field
951
+ if field.comment:
952
+ lines.append("") # Add blank line before field comment
953
+ comment_lines = field.comment.split('\n')
954
+ for comment_line in comment_lines:
955
+ # Unescape quotes in comments
956
+ comment_line = comment_line.replace('\\"', '"')
957
+ lines.append(f"# {comment_line}")
958
+ elif i == 0 and structure.comments:
959
+ # Use structure comment for first field if field has no comment
960
+ for comment in structure.comments:
961
+ comment_lines = comment.split('\n')
962
+ for comment_line in comment_lines:
963
+ lines.append(f"# {comment_line}")
964
+ lines.append(self._format_field(field))
965
+
966
+ content = "\n".join(lines)
967
+ file_path = self.srv_dir / f"{service_name}.srv"
968
+
969
+ with open(file_path, 'w') as f:
970
+ f.write(content)
971
+
972
+ print(f"Generated: {file_path}")
973
+ return file_path
974
+
975
+ def _generate_combined_action(self, action_name: str, parts: Dict) -> Optional[pathlib.Path]:
976
+ """Generate a combined .action file"""
977
+ lines = []
978
+
979
+ # Add header comment
980
+ lines.append(f"# {action_name}.action")
981
+ lines.append("# Generated from IDL file")
982
+ lines.append("")
983
+
984
+ # Add goal fields
985
+ lines.append("# Goal")
986
+ if parts['goal'].structures:
987
+ for field in parts['goal'].structures[0].fields:
988
+ lines.append(self._format_field(field))
989
+
990
+ lines.append("---")
991
+
992
+ # Add result fields
993
+ lines.append("# Result")
994
+ if parts['result'].structures:
995
+ for field in parts['result'].structures[0].fields:
996
+ lines.append(self._format_field(field))
997
+
998
+ lines.append("---")
999
+
1000
+ # Add feedback fields
1001
+ lines.append("# Feedback")
1002
+ if parts['feedback'].structures:
1003
+ for field in parts['feedback'].structures[0].fields:
1004
+ lines.append(self._format_field(field))
1005
+
1006
+ content = "\n".join(lines)
1007
+ file_path = self.action_dir / f"{action_name}.action"
1008
+
1009
+ with open(file_path, 'w') as f:
1010
+ f.write(content)
1011
+
1012
+ print(f"Generated: {file_path}")
1013
+ return file_path
1014
+
1015
+ def _generate_message(self, interface: IdlInterface) -> Optional[pathlib.Path]:
1016
+ """Generate .msg file"""
1017
+ if not interface.structures:
1018
+ return None
1019
+
1020
+ structure = interface.structures[0]
1021
+ content = self._generate_message_content(structure, interface)
1022
+
1023
+ file_path = self.msg_dir / f"{interface.name}.msg"
1024
+ with open(file_path, 'w') as f:
1025
+ f.write(content)
1026
+
1027
+ print(f"Generated: {file_path}")
1028
+ return file_path
1029
+
1030
+ def _generate_message_content(self, structure: IdlStructure, interface: IdlInterface) -> str:
1031
+ """Generate the content of a .msg file"""
1032
+ lines = []
1033
+
1034
+ # Add verbatim comments first (if any)
1035
+ if structure.comments:
1036
+ for comment in structure.comments:
1037
+ lines.append(f"# {comment}")
1038
+
1039
+ # Process fields in their original order to preserve IDL field sequence
1040
+ for field in structure.fields:
1041
+ # Add field comment if present
1042
+ if field.comment:
1043
+ lines.append("") # Add blank line before field comment
1044
+ comment_lines = field.comment.split('\n')
1045
+ for comment_line in comment_lines:
1046
+ lines.append(f"# {comment_line}")
1047
+ line = self._format_field(field)
1048
+ lines.append(line)
1049
+
1050
+ # Add constants after fields (for ROS2 .msg format compatibility)
1051
+ for constant in structure.constants:
1052
+ line = self._format_constant_as_field(constant)
1053
+ lines.append(line)
1054
+
1055
+ return "\n".join(lines)
1056
+
1057
+ def _format_field(self, field: IdlField) -> str:
1058
+ """Format a field for ROS interface file"""
1059
+ field_type = field.field_type
1060
+
1061
+ # Handle arrays and bounded types
1062
+ if field.is_array:
1063
+ if field.is_sequence and field.array_size is not None:
1064
+ # Bounded sequence: Type[<=N]
1065
+ field_type += f"[<={field.array_size}]"
1066
+ elif field.is_sequence:
1067
+ # Unbounded sequence: Type[]
1068
+ field_type += "[]"
1069
+ elif field.is_bounded_string:
1070
+ # Bounded string: string<=N
1071
+ field_type = f"string<={field.array_size}"
1072
+ elif field.array_size is not None:
1073
+ # Fixed-size array: Type[N]
1074
+ field_type += f"[{field.array_size}]"
1075
+ else:
1076
+ # Dynamic array: Type[]
1077
+ field_type += "[]"
1078
+
1079
+ line = f"{field_type} {field.name}"
1080
+
1081
+ # Add default value if present
1082
+ if field.default_value:
1083
+ line += f" {field.default_value}"
1084
+
1085
+ return line
1086
+
1087
+ def _format_constant_as_field(self, constant: IdlConstant) -> str:
1088
+ """Format a constant as a field-like entry for compatibility with ROS2 .msg format"""
1089
+ return f"{constant.const_type} {constant.name}={constant.value}"
1090
+
1091
+ def _format_constant(self, constant: IdlConstant) -> str:
1092
+ """Format a constant for ROS interface file"""
1093
+ return f"{constant.const_type} {constant.name}={constant.value}"
1094
+
1095
+
1096
+ def main():
1097
+ """Main function"""
1098
+ parser = argparse.ArgumentParser(description="Convert ROS2 IDL files to interface files")
1099
+ parser.add_argument("idl_file", help="Path to the IDL file to convert")
1100
+ parser.add_argument("-o", "--output", default="ros_interfaces",
1101
+ help="Output directory for generated files")
1102
+ parser.add_argument("-r", "--root",
1103
+ help="Root path where the generated files will be located (default: current directory)")
1104
+ parser.add_argument("-p", "--package",
1105
+ help="Package name to use for generated files (overrides package from IDL)")
1106
+ parser.add_argument("-v", "--verbose", action="store_true",
1107
+ help="Enable verbose output")
1108
+
1109
+ args = parser.parse_args()
1110
+
1111
+ if not os.path.exists(args.idl_file):
1112
+ print(f"Error: IDL file '{args.idl_file}' not found")
1113
+ return 1
1114
+
1115
+ try:
1116
+ # Parse IDL file
1117
+ idl_parser = IdlParser()
1118
+ interfaces = idl_parser.parse_file(args.idl_file)
1119
+
1120
+ # Override package name if provided
1121
+ if args.package:
1122
+ for interface in interfaces:
1123
+ interface.package = args.package
1124
+
1125
+ if args.verbose:
1126
+ print(f"Parsed {len(interfaces)} interfaces from {args.idl_file}")
1127
+ for interface in interfaces:
1128
+ print(f" - {interface.name} ({interface.interface_type.value})")
1129
+ if args.package:
1130
+ print(f" Package: {interface.package} (overridden)")
1131
+ else:
1132
+ print(f" Package: {interface.package}")
1133
+
1134
+ # Determine output directory
1135
+ if args.root:
1136
+ output_dir = pathlib.Path(args.root) / args.output
1137
+ else:
1138
+ output_dir = pathlib.Path(args.output)
1139
+
1140
+ # Generate ROS interface files
1141
+ generator = RosInterfaceGenerator(str(output_dir))
1142
+ generated_files = generator.generate_interfaces(interfaces)
1143
+
1144
+ print(f"\nGenerated {len(generated_files)} files:")
1145
+ for file_path in generated_files:
1146
+ print(f" - {file_path}")
1147
+
1148
+ # Display generated file contents if verbose
1149
+ if args.verbose:
1150
+ print("\n" + "="*60)
1151
+ print("Generated file contents:")
1152
+ for file_path in generated_files:
1153
+ print(f"\n--- {pathlib.Path(file_path).name} ---")
1154
+ with open(file_path, 'r') as f:
1155
+ print(f.read())
1156
+
1157
+ return 0
1158
+
1159
+ except Exception as e:
1160
+ print(f"Error: {e}")
1161
+ if args.verbose:
1162
+ import traceback
1163
+ traceback.print_exc()
1164
+ return 1
1165
+
1166
+
1167
+ if __name__ == "__main__":
1168
+ if len(sys.argv) == 1:
1169
+ print("\nUsage: python idl_parser.py <idl_file> [options]")
1170
+ print("Options:")
1171
+ print(" -o, --output DIR Output directory name")
1172
+ print(" -r, --root PATH Root path for generated files")
1173
+ print(" -p, --package NAME Package name to use")
1174
+ print(" -v, --verbose Enable verbose output")
1175
+ else:
1176
+ exit(main())