kaggle 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,255 @@
1
+ module Kaggle
2
+ class Client
3
+ include HTTParty
4
+
5
+ base_uri Constants::BASE_URL
6
+
7
+ attr_reader :username, :api_key, :download_path, :cache_path, :timeout
8
+
9
+ def initialize(username: nil, api_key: nil, credentials_file: nil, download_path: nil, cache_path: nil, timeout: nil)
10
+ load_credentials(username, api_key, credentials_file)
11
+ @download_path = download_path || Constants::DEFAULT_DOWNLOAD_PATH
12
+ @cache_path = cache_path || Constants::DEFAULT_CACHE_PATH
13
+ @timeout = timeout || Constants::DEFAULT_TIMEOUT
14
+
15
+ raise AuthenticationError, 'Username and API key are required' unless valid_credential?(@username) && valid_credential?(@api_key)
16
+
17
+ ensure_directories_exist
18
+ setup_httparty_options
19
+ end
20
+
21
+ def download_dataset(dataset_owner, dataset_name, options = {})
22
+ dataset_path = "#{dataset_owner}/#{dataset_name}"
23
+
24
+ # Check cache first for parsed data
25
+ if options[:use_cache] && options[:parse_csv]
26
+ cache_key = generate_cache_key(dataset_path)
27
+ if cached_file_exists?(cache_key)
28
+ return load_from_cache(cache_key)
29
+ end
30
+ end
31
+
32
+ # Check if we already have extracted files for this dataset
33
+ extracted_dir = get_extracted_dir(dataset_path)
34
+ if options[:use_cache] && Dir.exist?(extracted_dir) && !Dir.empty?(extracted_dir)
35
+ return handle_existing_dataset(extracted_dir, options)
36
+ end
37
+
38
+ # Download the zip file
39
+ response = authenticated_request(:get, "#{Constants::DATASET_ENDPOINTS[:download]}/#{dataset_path}")
40
+
41
+ unless response.success?
42
+ raise DownloadError, "Failed to download dataset: #{response.message}"
43
+ end
44
+
45
+ # Save zip file
46
+ zip_file = save_zip_file(dataset_path, response.body)
47
+
48
+ # Extract zip file
49
+ extract_zip_file(zip_file, extracted_dir)
50
+
51
+ # Clean up zip file
52
+ File.delete(zip_file) if File.exist?(zip_file)
53
+
54
+ # Handle the extracted files
55
+ result = handle_extracted_dataset(extracted_dir, options)
56
+
57
+ # Cache parsed CSV data if requested
58
+ if options[:use_cache] && options[:parse_csv] && (result.is_a?(Hash) || result.is_a?(Array))
59
+ cache_key = generate_cache_key(dataset_path)
60
+ cache_parsed_data(cache_key, result)
61
+ end
62
+
63
+ result
64
+ end
65
+
66
+
67
+ def dataset_files(dataset_owner, dataset_name)
68
+ dataset_path = "#{dataset_owner}/#{dataset_name}"
69
+ response = authenticated_request(:get, "#{Constants::DATASET_ENDPOINTS[:files]}/#{dataset_path}")
70
+
71
+ unless response.success?
72
+ raise DatasetNotFoundError, "Dataset not found or accessible: #{dataset_path}"
73
+ end
74
+
75
+ Oj.load(response.body)
76
+ rescue Oj::ParseError => e
77
+ raise ParseError, "Failed to parse dataset files response: #{e.message}"
78
+ end
79
+
80
+ def parse_csv_to_json(file_path)
81
+ raise Error, "File does not exist: #{file_path}" unless File.exist?(file_path)
82
+ raise Error, "File is not a CSV: #{file_path}" unless csv_file?(file_path)
83
+
84
+ data = []
85
+ CSV.foreach(file_path, headers: true) do |row|
86
+ data << row.to_hash
87
+ end
88
+
89
+ data
90
+ rescue CSV::MalformedCSVError => e
91
+ raise ParseError, "Failed to parse CSV file: #{e.message}"
92
+ end
93
+
94
+ private
95
+
96
+ def valid_credential?(credential)
97
+ credential && !credential.to_s.strip.empty?
98
+ end
99
+
100
+ def load_credentials(username, api_key, credentials_file)
101
+ # Try provided credentials file first
102
+ if credentials_file && File.exist?(credentials_file)
103
+ credentials = load_credentials_from_file(credentials_file)
104
+ @username = username || credentials['username']
105
+ @api_key = api_key || credentials['key']
106
+ # Try default kaggle.json file if no explicit credentials
107
+ elsif !username && !api_key && File.exist?(Constants::DEFAULT_CREDENTIALS_FILE)
108
+ credentials = load_credentials_from_file(Constants::DEFAULT_CREDENTIALS_FILE)
109
+ @username = credentials['username']
110
+ @api_key = credentials['key']
111
+ else
112
+ # Fall back to environment variables
113
+ @username = username || ENV['KAGGLE_USERNAME']
114
+ @api_key = api_key || ENV['KAGGLE_KEY']
115
+ end
116
+ end
117
+
118
+ def load_credentials_from_file(file_path)
119
+ content = File.read(file_path)
120
+ Oj.load(content)
121
+ rescue Oj::ParseError => e
122
+ raise AuthenticationError, "Invalid credentials file format: #{e.message}"
123
+ rescue => e
124
+ raise AuthenticationError, "Failed to read credentials file: #{e.message}"
125
+ end
126
+
127
+ def ensure_directories_exist
128
+ FileUtils.mkdir_p(@download_path) unless Dir.exist?(@download_path)
129
+ FileUtils.mkdir_p(@cache_path) unless Dir.exist?(@cache_path)
130
+ end
131
+
132
+ def setup_httparty_options
133
+ self.class.default_options.merge!({
134
+ headers: Constants::REQUIRED_HEADERS,
135
+ timeout: @timeout,
136
+ basic_auth: {
137
+ username: @username,
138
+ password: @api_key
139
+ }
140
+ })
141
+ end
142
+
143
+ def authenticated_request(method, endpoint, options = {})
144
+ self.class.send(method, endpoint, options)
145
+ rescue Timeout::Error, Net::ReadTimeout, Net::OpenTimeout
146
+ raise Error, 'Request timed out'
147
+ rescue => e
148
+ raise Error, "Request failed: #{e.message}"
149
+ end
150
+
151
+ def get_extracted_dir(dataset_path)
152
+ dir_name = dataset_path.gsub('/', '_')
153
+ File.join(@download_path, dir_name)
154
+ end
155
+
156
+ def save_zip_file(dataset_path, content)
157
+ filename = "#{dataset_path.gsub('/', '_')}.zip"
158
+ file_path = File.join(@download_path, filename)
159
+
160
+ File.open(file_path, 'wb') do |file|
161
+ file.write(content)
162
+ end
163
+
164
+ file_path
165
+ end
166
+
167
+ def extract_zip_file(zip_file_path, extract_to_dir)
168
+ FileUtils.mkdir_p(extract_to_dir)
169
+
170
+ Zip::File.open(zip_file_path) do |zip_file|
171
+ zip_file.each do |entry|
172
+ extract_path = File.join(extract_to_dir, entry.name)
173
+
174
+ if entry.directory?
175
+ # Create directory
176
+ FileUtils.mkdir_p(extract_path)
177
+ else
178
+ # Create parent directory if it doesn't exist
179
+ parent_dir = File.dirname(extract_path)
180
+ FileUtils.mkdir_p(parent_dir) unless Dir.exist?(parent_dir)
181
+
182
+ # Extract file manually to avoid path issues
183
+ File.open(extract_path, 'wb') do |f|
184
+ f.write entry.get_input_stream.read
185
+ end
186
+ end
187
+ end
188
+ end
189
+ rescue Zip::Error => e
190
+ raise DownloadError, "Failed to extract zip file: #{e.message}"
191
+ end
192
+
193
+ def handle_existing_dataset(extracted_dir, options)
194
+ if options[:parse_csv]
195
+ csv_files = find_csv_files(extracted_dir)
196
+ return parse_csv_files_to_json(csv_files) unless csv_files.empty?
197
+ end
198
+
199
+ extracted_dir
200
+ end
201
+
202
+ def handle_extracted_dataset(extracted_dir, options)
203
+ if options[:parse_csv]
204
+ csv_files = find_csv_files(extracted_dir)
205
+ unless csv_files.empty?
206
+ parsed_data = parse_csv_files_to_json(csv_files)
207
+ return parsed_data
208
+ end
209
+ end
210
+
211
+ extracted_dir
212
+ end
213
+
214
+ def find_csv_files(directory)
215
+ Dir.glob(File.join(directory, '**', '*.csv'))
216
+ end
217
+
218
+ def parse_csv_files_to_json(csv_files)
219
+ result = {}
220
+
221
+ csv_files.each do |csv_file|
222
+ file_name = File.basename(csv_file, '.csv')
223
+ result[file_name] = parse_csv_to_json(csv_file)
224
+ end
225
+
226
+ # If there's only one CSV file, return its data directly
227
+ result.length == 1 ? result.values.first : result
228
+ end
229
+
230
+ def generate_cache_key(dataset_path)
231
+ "#{dataset_path.gsub('/', '_')}_parsed.json"
232
+ end
233
+
234
+ def cached_file_exists?(cache_key)
235
+ File.exist?(File.join(@cache_path, cache_key))
236
+ end
237
+
238
+ def load_from_cache(cache_key)
239
+ cache_file_path = File.join(@cache_path, cache_key)
240
+ Oj.load(File.read(cache_file_path))
241
+ rescue Oj::ParseError => e
242
+ raise ParseError, "Failed to parse cached data: #{e.message}"
243
+ end
244
+
245
+ def cache_parsed_data(cache_key, data)
246
+ cache_file_path = File.join(@cache_path, cache_key)
247
+ File.write(cache_file_path, Oj.dump(data, mode: :compat, indent: 2))
248
+ end
249
+
250
+ def csv_file?(file_path)
251
+ File.extname(file_path).downcase == '.csv'
252
+ end
253
+
254
+ end
255
+ end
@@ -0,0 +1,23 @@
1
+ module Kaggle
2
+ module Constants
3
+ BASE_URL = 'https://www.kaggle.com/api/v1'
4
+
5
+ DEFAULT_DOWNLOAD_PATH = './downloads'
6
+ DEFAULT_CACHE_PATH = './cache'
7
+ DEFAULT_CREDENTIALS_FILE = './kaggle.json'
8
+ DEFAULT_TIMEOUT = 30
9
+
10
+ SUPPORTED_FORMATS = %w[csv json].freeze
11
+
12
+ DATASET_ENDPOINTS = {
13
+ view: '/datasets/view',
14
+ download: '/datasets/download',
15
+ files: '/datasets/data'
16
+ }.freeze
17
+
18
+ REQUIRED_HEADERS = {
19
+ 'User-Agent' => 'Kaggle Ruby Client/0.0.1',
20
+ 'Accept' => 'application/json'
21
+ }.freeze
22
+ end
23
+ end
@@ -0,0 +1,3 @@
1
+ module Kaggle
2
+ VERSION = '0.0.1'
3
+ end
data/lib/kaggle.rb ADDED
@@ -0,0 +1,19 @@
1
+ require 'httparty'
2
+ require 'csv'
3
+ require 'oj'
4
+ require 'fileutils'
5
+ require 'net/http'
6
+ require 'timeout'
7
+ require 'zip'
8
+
9
+ require_relative 'kaggle/version'
10
+ require_relative 'kaggle/constants'
11
+ require_relative 'kaggle/client'
12
+
13
+ module Kaggle
14
+ class Error < StandardError; end
15
+ class AuthenticationError < Error; end
16
+ class DatasetNotFoundError < Error; end
17
+ class DownloadError < Error; end
18
+ class ParseError < Error; end
19
+ end
@@ -0,0 +1,179 @@
1
+ # Benchmarks and Performance Plan
2
+
3
+ ## Overview
4
+ Implement benchmarking capabilities to measure and compare model performance, dataset processing speeds, and API response times.
5
+
6
+ ## Current State
7
+ - No benchmarking functionality exists
8
+ - Basic error handling and performance considerations in place
9
+ - Opportunity to build comprehensive benchmarking suite
10
+
11
+ ## Planned Features
12
+
13
+ ### Phase 1: Dataset Benchmarks
14
+ - [ ] **Download Speed Metrics**: Measure dataset download speeds
15
+ - [ ] **Parsing Performance**: Benchmark CSV to JSON conversion speeds
16
+ - [ ] **Cache Performance**: Measure cache hit/miss ratios and speeds
17
+ - [ ] **Size vs Speed Analysis**: Correlate dataset size with processing time
18
+ - [ ] **Format Comparison**: Compare performance across different file formats
19
+
20
+ ### Phase 2: Model Benchmarks
21
+ - [ ] **Model Download Times**: Track model download performance
22
+ - [ ] **Loading Benchmarks**: Measure model loading and initialization times
23
+ - [ ] **Inference Speed**: Benchmark model prediction performance
24
+ - [ ] **Memory Usage**: Monitor memory consumption during operations
25
+ - [ ] **Framework Comparison**: Compare performance across ML frameworks
26
+
27
+ ### Phase 3: API Performance
28
+ - [ ] **Response Time Tracking**: Monitor API endpoint response times
29
+ - [ ] **Rate Limit Analysis**: Track API rate limiting and optimal usage patterns
30
+ - [ ] **Concurrent Request Performance**: Benchmark parallel API calls
31
+ - [ ] **Error Rate Monitoring**: Track API error rates over time
32
+ - [ ] **Geolocation Performance**: Compare performance from different regions
33
+
34
+ ### Phase 4: System Benchmarks
35
+ - [ ] **Network Performance**: Measure network conditions impact
36
+ - [ ] **Disk I/O Performance**: Benchmark local file operations
37
+ - [ ] **CPU/Memory Usage**: Profile resource consumption
38
+ - [ ] **Platform Comparison**: Compare performance across operating systems
39
+ - [ ] **Ruby Version Impact**: Benchmark across different Ruby versions
40
+
41
+ ## Technical Implementation
42
+
43
+ ### Benchmarking Framework
44
+ ```ruby
45
+ # lib/kaggle/benchmark.rb
46
+ module Kaggle
47
+ class Benchmark
48
+ include Benchmark as RubyBenchmark
49
+
50
+ attr_reader :results, :config
51
+
52
+ def initialize(config = {})
53
+ @config = default_config.merge(config)
54
+ @results = []
55
+ end
56
+
57
+ def run_dataset_benchmark(dataset_path, iterations: 5)
58
+ # Benchmark dataset operations
59
+ end
60
+
61
+ def run_api_benchmark(endpoint, iterations: 10)
62
+ # Benchmark API endpoint performance
63
+ end
64
+
65
+ def generate_report
66
+ # Generate performance report
67
+ end
68
+ end
69
+ end
70
+
71
+ # lib/kaggle/performance_monitor.rb
72
+ class Kaggle::PerformanceMonitor
73
+ def self.monitor(operation_name, &block)
74
+ # Monitor and log performance metrics
75
+ end
76
+
77
+ def self.track_memory_usage(&block)
78
+ # Track memory usage during operations
79
+ end
80
+
81
+ def self.profile_cpu_usage(&block)
82
+ # Profile CPU usage patterns
83
+ end
84
+ end
85
+ ```
86
+
87
+ ### Metrics Collection
88
+ ```ruby
89
+ # Performance metrics structure
90
+ {
91
+ operation: 'dataset_download',
92
+ timestamp: Time.current,
93
+ duration_ms: 1234,
94
+ memory_usage_mb: 45.6,
95
+ cpu_usage_percent: 23.4,
96
+ network_bytes: 1024000,
97
+ cache_hit: true,
98
+ error: nil,
99
+ metadata: {
100
+ dataset_size_mb: 100,
101
+ file_count: 5,
102
+ format: 'csv'
103
+ }
104
+ }
105
+ ```
106
+
107
+ ### CLI Integration
108
+ ```bash
109
+ # Run benchmarks
110
+ kaggle benchmark datasets --iterations 10
111
+ kaggle benchmark api --endpoint datasets/list
112
+ kaggle benchmark models --model-id example/model
113
+
114
+ # View benchmark results
115
+ kaggle benchmark report
116
+ kaggle benchmark compare --baseline v0.1.0
117
+ kaggle benchmark export --format json
118
+
119
+ # Performance profiling
120
+ kaggle profile download dataset-owner/dataset-name
121
+ kaggle profile parse large-dataset.csv
122
+ ```
123
+
124
+ ### Reporting and Visualization
125
+ - [ ] **HTML Reports**: Generate detailed HTML performance reports
126
+ - [ ] **CSV Export**: Export raw metrics for external analysis
127
+ - [ ] **Comparison Reports**: Compare performance across versions/configurations
128
+ - [ ] **Trend Analysis**: Track performance changes over time
129
+ - [ ] **Regression Detection**: Alert on performance degradation
130
+
131
+ ### Integration with Testing
132
+ ```ruby
133
+ # test/performance/benchmark_test.rb
134
+ class BenchmarkTest < Minitest::Test
135
+ def test_dataset_download_performance
136
+ benchmark = Kaggle::Benchmark.new
137
+ result = benchmark.run_dataset_benchmark('test/dataset')
138
+
139
+ # Assert performance meets requirements
140
+ assert result.average_duration < 5000, "Download too slow"
141
+ assert result.memory_usage < 100, "Memory usage too high"
142
+ end
143
+ end
144
+ ```
145
+
146
+ ## Performance Targets
147
+
148
+ ### Dataset Operations
149
+ - CSV parsing: < 1MB/second for typical datasets
150
+ - Download speed: Limited by network, not processing
151
+ - Cache retrieval: < 100ms for typical datasets
152
+ - Memory usage: < 2x dataset size during processing
153
+
154
+ ### API Operations
155
+ - List requests: < 2 seconds response time
156
+ - Download initiation: < 5 seconds
157
+ - Metadata retrieval: < 1 second
158
+ - Error recovery: < 30 seconds for retries
159
+
160
+ ### Model Operations
161
+ - Model listing: < 3 seconds response time
162
+ - Model download: Progress tracking every 5% completion
163
+ - Model loading: Framework-dependent, track baseline
164
+ - Inference: Model-specific, establish benchmarks
165
+
166
+ ## Continuous Integration
167
+ - [ ] **Automated Benchmarks**: Run benchmarks in CI pipeline
168
+ - [ ] **Performance Regression Tests**: Fail CI on significant slowdowns
169
+ - [ ] **Baseline Tracking**: Maintain performance baselines across versions
170
+ - [ ] **Alert System**: Notify maintainers of performance issues
171
+
172
+ ## Priority: Low
173
+ Target completion: Version 0.5.0
174
+
175
+ ## Notes
176
+ - Benchmarks should be optional and not affect normal gem usage
177
+ - Consider integration with Ruby profiling tools (ruby-prof, memory_profiler)
178
+ - Benchmarks may reveal optimization opportunities in current code
179
+ - Results should be comparable across different environments
data/plans/cli_tool.md ADDED
@@ -0,0 +1,35 @@
1
+ # CLI Tool Enhancement Plan
2
+
3
+ ## Current State
4
+ The gem includes a basic CLI tool (`bin/kaggle`) with essential functionality for listing, downloading, and viewing dataset files.
5
+
6
+ ## Planned Enhancements
7
+
8
+ ### Phase 1: Core CLI Improvements
9
+ - [ ] **Interactive Mode**: Add interactive prompts for common operations
10
+ - [ ] **Progress Indicators**: Show download progress for large datasets
11
+ - [ ] **Better Output Formatting**: Improve table formatting for dataset lists
12
+ - [ ] **Configuration File Support**: Allow CLI configuration via YAML/JSON config files
13
+ - [ ] **Verbose/Quiet Modes**: Add -v and -q flags for different output levels
14
+
15
+ ### Phase 2: Advanced Features
16
+ - [ ] **Bulk Operations**: Support downloading multiple datasets with patterns
17
+ - [ ] **Search Filters**: Advanced filtering options (date, size, format, etc.)
18
+ - [ ] **Export Formats**: Support exporting dataset lists to CSV/JSON
19
+ - [ ] **Parallel Downloads**: Download multiple datasets concurrently
20
+ - [ ] **Resume Downloads**: Resume interrupted downloads
21
+
22
+ ### Phase 3: Competition Support
23
+ - [ ] **Competition Listing**: List available competitions
24
+ - [ ] **Competition Data**: Download competition datasets
25
+ - [ ] **Submission Management**: Submit competition entries via CLI
26
+ - [ ] **Leaderboard View**: View competition leaderboards
27
+
28
+ ### Implementation Notes
29
+ - Use Thor or TTY toolkit for enhanced CLI functionality
30
+ - Add comprehensive help system with examples
31
+ - Include bash/zsh completion scripts
32
+ - Implement proper signal handling for graceful interruption
33
+
34
+ ## Priority: Medium
35
+ Target completion: Version 0.2.0
@@ -0,0 +1,11 @@
1
+ Please help implement a new Ruby gem here. Use `/Users/trex22/development/url_categorise` as an example. The Version should be 0.0.1. Here is the API documentation: @https://www.kaggle.com/docs/api . Also here is another context resource: @https://github.com/Kaggle/kaggle-api. Create a new plan in plans/. For now the gem should only handle downloading and parsing datasets. For now only open CSV datasets into a json structure. Allow for optional parameters to specify download paths and caching paths as this gem will be used elsewhere where we want a cache download location so that the dataset does not always have to be downloaded. Create and update a README.md with relevant info and use `/Users/trex22/development/url_categorise` as an example. Also add in tests using `/Users/trex22/development/url_categorise` as an example with relevant bin/ scripts. Add in a CLAUDE.md as well. Lastly, update the plans with new plans for future development which include, CLI tool, lists, models, benchmarks etc ...
2
+
3
+ List and plan out all actions before actioning.
4
+
5
+ Use the oj gem version 3.16.11 instead of the ruby json library. Also please increase test │
6
+ │ coverage and fix the failing tests. Also copy codes of conduct from │
7
+ │ /Users/trex22/development/url_categorise/.
8
+
9
+
10
+ Oj has a slightly different interface: https://github.com/ohler55/oj
11
+
data/plans/lists.md ADDED
@@ -0,0 +1,77 @@
1
+ # Lists Enhancement Plan
2
+
3
+ ## Overview
4
+ Expand the current listing functionality to provide comprehensive discovery and filtering capabilities for Kaggle resources.
5
+
6
+ ## Current State
7
+ - Basic dataset listing with search and pagination
8
+ - Simple dataset file listing
9
+
10
+ ## Planned Enhancements
11
+
12
+ ### Phase 1: Enhanced Dataset Lists
13
+ - [ ] **Advanced Filtering**: Filter by license, file formats, size, update date
14
+ - [ ] **Sorting Options**: Sort by popularity, date, size, downloads
15
+ - [ ] **Category Browsing**: Browse datasets by category/topic
16
+ - [ ] **User/Organization Datasets**: List datasets by specific users or organizations
17
+ - [ ] **Featured Datasets**: Highlight trending or featured datasets
18
+
19
+ ### Phase 2: Competition Lists
20
+ - [ ] **Competition Discovery**: List active, completed, and upcoming competitions
21
+ - [ ] **Competition Filtering**: Filter by category, prize pool, participant count
22
+ - [ ] **Competition Search**: Search competitions by title, description, tags
23
+ - [ ] **Personal Competitions**: List user's participated competitions
24
+ - [ ] **Competition Metrics**: Show participation stats, deadlines, prizes
25
+
26
+ ### Phase 3: Model Lists
27
+ - [ ] **Model Discovery**: List available models and frameworks
28
+ - [ ] **Model Filtering**: Filter by framework, task type, performance metrics
29
+ - [ ] **Model Versions**: Track different versions of models
30
+ - [ ] **Popular Models**: Highlight trending and highly-rated models
31
+ - [ ] **User Models**: List models by specific users
32
+
33
+ ### Phase 4: Kernel/Notebook Lists
34
+ - [ ] **Code Discovery**: List public kernels and notebooks
35
+ - [ ] **Language Filtering**: Filter by programming language (R, Python, etc.)
36
+ - [ ] **Topic Browsing**: Browse by dataset or competition
37
+ - [ ] **Popular Code**: Highlight most-voted and most-forked notebooks
38
+ - [ ] **Recent Activity**: Show recently updated kernels
39
+
40
+ ## Technical Implementation
41
+
42
+ ### API Endpoints
43
+ - Implement consistent pagination across all list types
44
+ - Add caching layer for frequently accessed lists
45
+ - Support bulk operations for multiple list requests
46
+
47
+ ### CLI Enhancements
48
+ - Interactive filtering and sorting in CLI
49
+ - Export capabilities (CSV, JSON, XML)
50
+ - Bookmarking and favorites functionality
51
+ - Watchlist for monitoring specific items
52
+
53
+ ### Data Structures
54
+ ```ruby
55
+ # Enhanced listing response format
56
+ {
57
+ items: [], # List of resources
58
+ pagination: { # Pagination metadata
59
+ page: 1,
60
+ per_page: 20,
61
+ total_pages: 50,
62
+ total_count: 1000
63
+ },
64
+ filters: { # Applied filters
65
+ category: 'finance',
66
+ license: 'cc-by',
67
+ updated_since: '2023-01-01'
68
+ },
69
+ sort: { # Current sorting
70
+ field: 'popularity',
71
+ direction: 'desc'
72
+ }
73
+ }
74
+ ```
75
+
76
+ ## Priority: High
77
+ Target completion: Version 0.3.0