everyrow 0.1.10__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {everyrow-0.1.10 → everyrow-0.2.0}/.claude-plugin/marketplace.json +1 -1
- {everyrow-0.1.10 → everyrow-0.2.0}/.claude-plugin/plugin.json +1 -1
- everyrow-0.2.0/.env.example +6 -0
- everyrow-0.2.0/.github/workflows/deploy-docs.yaml +92 -0
- everyrow-0.2.0/.github/workflows/docs-sync-check.yml +81 -0
- everyrow-0.2.0/.github/workflows/integration-tests.yml +32 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/.gitignore +3 -0
- everyrow-0.2.0/CITATION.cff +22 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/PKG-INFO +22 -8
- {everyrow-0.1.10 → everyrow-0.2.0}/README.md +21 -7
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/add-column-web-lookup.md +5 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/dedupe-crm-company-records/notebook.ipynb +6 -7
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/match-software-vendors-to-requirements/notebook.ipynb +4 -11
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/merge-contacts-with-company-data/notebook.ipynb +4 -9
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/merge-overlapping-contact-lists/notebook.ipynb +4 -9
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/multi-stage-lead-qualification/notebook.ipynb +4 -13
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/research-and-rank-permit-times/notebook.ipynb +4 -9
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/score-leads-from-fragmented-data/notebook.ipynb +4 -9
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/score-leads-without-crm-history/notebook.ipynb +4 -9
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/screen-job-postings-by-criteria/notebook.ipynb +4 -12
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/screen-stocks-by-investment-thesis/notebook.ipynb +5 -10
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/screen-stocks-by-margin-sensitivity/notebook.ipynb +5 -14
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/classify-dataframe-rows-llm.md +5 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/deduplicate-training-data-ml.md +5 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/filter-dataframe-with-llm.md +5 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/fuzzy-join-without-keys.md +5 -0
- everyrow-0.2.0/docs/in_progress/basic-usage/notebook.ipynb +850 -0
- everyrow-0.2.0/docs/installation.mdx +306 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/rank-by-external-metric.md +5 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/reference/DEDUPE.md +7 -10
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/reference/MERGE.md +7 -15
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/reference/RANK.md +6 -17
- everyrow-0.1.10/docs/reference/AGENT.md → everyrow-0.2.0/docs/reference/RESEARCH.md +9 -22
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/reference/SCREEN.md +7 -15
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/resolve-entities-python.md +5 -0
- everyrow-0.2.0/docs/skills-vs-mcp.md +54 -0
- everyrow-0.2.0/docs-site/.gitignore +27 -0
- everyrow-0.2.0/docs-site/README.md +63 -0
- everyrow-0.2.0/docs-site/next.config.ts +10 -0
- everyrow-0.2.0/docs-site/package.json +33 -0
- everyrow-0.2.0/docs-site/pnpm-lock.yaml +2412 -0
- everyrow-0.2.0/docs-site/public/images/mcp-flow.svg +3 -0
- everyrow-0.2.0/docs-site/public/images/skills-flow.svg +3 -0
- everyrow-0.2.0/docs-site/scripts/convert-notebooks.py +53 -0
- everyrow-0.2.0/docs-site/scripts/validate-notebooks.py +106 -0
- everyrow-0.2.0/docs-site/src/app/[...slug]/page.tsx +59 -0
- everyrow-0.2.0/docs-site/src/app/globals.css +480 -0
- everyrow-0.2.0/docs-site/src/app/layout.tsx +27 -0
- everyrow-0.2.0/docs-site/src/app/notebooks/[slug]/page.tsx +47 -0
- everyrow-0.2.0/docs-site/src/app/page.tsx +116 -0
- everyrow-0.2.0/docs-site/src/app/sitemap.ts +37 -0
- everyrow-0.2.0/docs-site/src/components/DocsLayout.tsx +20 -0
- everyrow-0.2.0/docs-site/src/components/InstallationTabs.tsx +177 -0
- everyrow-0.2.0/docs-site/src/components/MDXContent.tsx +30 -0
- everyrow-0.2.0/docs-site/src/components/Sidebar.tsx +46 -0
- everyrow-0.2.0/docs-site/src/components/providers/PostHogProvider.tsx +64 -0
- everyrow-0.2.0/docs-site/src/styles/notebook.css +196 -0
- everyrow-0.2.0/docs-site/src/utils/docs.ts +142 -0
- everyrow-0.2.0/docs-site/src/utils/markdown.ts +18 -0
- everyrow-0.2.0/docs-site/src/utils/notebooks.ts +109 -0
- everyrow-0.2.0/docs-site/tsconfig.json +41 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/README.md +11 -3
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/manifest.json +1 -1
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/pyproject.toml +1 -1
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/server.json +2 -2
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/src/everyrow_mcp/server.py +6 -1
- {everyrow-0.1.10 → everyrow-0.2.0}/gemini-extension.json +1 -1
- {everyrow-0.1.10 → everyrow-0.2.0}/generate_openapi.sh +1 -1
- {everyrow-0.1.10 → everyrow-0.2.0}/pyproject.toml +7 -2
- everyrow-0.2.0/src/everyrow/__init__.py +12 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/api_utils.py +5 -1
- everyrow-0.2.0/src/everyrow/billing.py +29 -0
- everyrow-0.2.0/src/everyrow/constants.py +4 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/generated/__init__.py +1 -1
- everyrow-0.1.10/src/everyrow/generated/api/default/interrupt_chat_task_tasks_chat_interrupt_post.py → everyrow-0.2.0/src/everyrow/generated/api/artifacts/create_artifact_artifacts_post.py +38 -32
- everyrow-0.2.0/src/everyrow/generated/api/billing/__init__.py +1 -0
- everyrow-0.1.10/src/everyrow/generated/api/default/get_queues_stats_jobs_queues_get.py → everyrow-0.2.0/src/everyrow/generated/api/billing/get_billing_balance_billing_get.py +25 -17
- everyrow-0.2.0/src/everyrow/generated/api/operations/__init__.py +1 -0
- everyrow-0.1.10/src/everyrow/generated/api/default/re_execute_task_endpoint_tasks_re_execute_post.py → everyrow-0.2.0/src/everyrow/generated/api/operations/agent_map_operations_agent_map_post.py +53 -41
- everyrow-0.1.10/src/everyrow/generated/api/default/create_workflow_from_artifact_workflows_from_artifact_post.py → everyrow-0.2.0/src/everyrow/generated/api/operations/dedupe_operations_dedupe_post.py +39 -33
- everyrow-0.1.10/src/everyrow/generated/api/default/submit_task_tasks_post.py → everyrow-0.2.0/src/everyrow/generated/api/operations/merge_operations_merge_post.py +37 -29
- everyrow-0.1.10/src/everyrow/generated/api/default/copy_artifacts_artifacts_copy_post.py → everyrow-0.2.0/src/everyrow/generated/api/operations/rank_operations_rank_post.py +43 -29
- everyrow-0.1.10/src/everyrow/generated/api/default/export_to_google_sheets_export_post.py → everyrow-0.2.0/src/everyrow/generated/api/operations/screen_operations_screen_post.py +43 -31
- everyrow-0.2.0/src/everyrow/generated/api/operations/single_agent_operations_single_agent_post.py +236 -0
- everyrow-0.2.0/src/everyrow/generated/api/sessions/__init__.py +1 -0
- everyrow-0.1.10/src/everyrow/generated/api/default/copy_workflow_endpoint_workflows_copy_post.py → everyrow-0.2.0/src/everyrow/generated/api/sessions/create_session_endpoint_sessions_post.py +35 -27
- everyrow-0.2.0/src/everyrow/generated/api/tasks/__init__.py +1 -0
- everyrow-0.1.10/src/everyrow/generated/api/default/get_job_progress_for_task_jobs_progress_get.py → everyrow-0.2.0/src/everyrow/generated/api/tasks/get_task_result_tasks_task_id_result_get.py +45 -33
- everyrow-0.1.10/src/everyrow/generated/api/default/get_task_status_endpoint_tasks_task_id_status_get.py → everyrow-0.2.0/src/everyrow/generated/api/tasks/get_task_status_tasks_task_id_status_get.py +24 -42
- everyrow-0.2.0/src/everyrow/generated/models/__init__.py +97 -0
- everyrow-0.2.0/src/everyrow/generated/models/agent_map_operation.py +315 -0
- everyrow-0.1.10/src/everyrow/generated/models/artifact_group_record_analysis_type_0.py → everyrow-0.2.0/src/everyrow/generated/models/agent_map_operation_input_type_1_item.py +5 -5
- everyrow-0.2.0/src/everyrow/generated/models/agent_map_operation_input_type_2.py +46 -0
- everyrow-0.1.10/src/everyrow/generated/models/standalone_artifact_record_analysis_type_0.py → everyrow-0.2.0/src/everyrow/generated/models/agent_map_operation_response_schema_type_0.py +5 -5
- everyrow-0.1.10/src/everyrow/generated/models/create_query_params.py → everyrow-0.2.0/src/everyrow/generated/models/billing_response.py +13 -12
- everyrow-0.1.10/src/everyrow/generated/models/continue_task_request.py → everyrow-0.2.0/src/everyrow/generated/models/create_artifact_request.py +43 -43
- everyrow-0.2.0/src/everyrow/generated/models/create_artifact_request_data_type_0_item.py +46 -0
- everyrow-0.1.10/src/everyrow/generated/models/task_metadata_cols_to_rename_type_0.py → everyrow-0.2.0/src/everyrow/generated/models/create_artifact_request_data_type_1.py +5 -5
- everyrow-0.1.10/src/everyrow/generated/models/copy_artifacts_response.py → everyrow-0.2.0/src/everyrow/generated/models/create_artifact_response.py +12 -12
- everyrow-0.1.10/src/everyrow/generated/models/create_session_request.py → everyrow-0.2.0/src/everyrow/generated/models/create_session.py +6 -7
- everyrow-0.2.0/src/everyrow/generated/models/dedupe_operation.py +151 -0
- everyrow-0.2.0/src/everyrow/generated/models/dedupe_operation_input_type_1_item.py +46 -0
- everyrow-0.1.10/src/everyrow/generated/models/export_request_token_data.py → everyrow-0.2.0/src/everyrow/generated/models/dedupe_operation_input_type_2.py +5 -5
- everyrow-0.2.0/src/everyrow/generated/models/error_response.py +109 -0
- everyrow-0.1.10/src/everyrow/generated/models/task_insert_query_params.py → everyrow-0.2.0/src/everyrow/generated/models/error_response_details_type_0.py +5 -5
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/generated/models/insufficient_balance_error.py +8 -0
- everyrow-0.1.10/src/everyrow/generated/models/llm_enum.py → everyrow-0.2.0/src/everyrow/generated/models/llm_enum_public.py +1 -20
- everyrow-0.2.0/src/everyrow/generated/models/merge_operation.py +278 -0
- everyrow-0.2.0/src/everyrow/generated/models/merge_operation_left_input_type_1_item.py +46 -0
- everyrow-0.1.10/src/everyrow/generated/models/chat_completion_message_tool_call.py → everyrow-0.2.0/src/everyrow/generated/models/merge_operation_left_input_type_2.py +5 -5
- everyrow-0.2.0/src/everyrow/generated/models/merge_operation_right_input_type_1_item.py +46 -0
- everyrow-0.2.0/src/everyrow/generated/models/merge_operation_right_input_type_2.py +46 -0
- everyrow-0.2.0/src/everyrow/generated/models/merge_operation_use_web_search_type_0.py +10 -0
- everyrow-0.1.10/src/everyrow/generated/models/task_status_response.py → everyrow-0.2.0/src/everyrow/generated/models/operation_response.py +43 -24
- everyrow-0.1.10/src/everyrow/generated/models/multi_agent_effort_level.py → everyrow-0.2.0/src/everyrow/generated/models/public_effort_level.py +1 -1
- everyrow-0.2.0/src/everyrow/generated/models/public_task_type.py +12 -0
- everyrow-0.2.0/src/everyrow/generated/models/rank_operation.py +203 -0
- everyrow-0.2.0/src/everyrow/generated/models/rank_operation_input_type_1_item.py +46 -0
- everyrow-0.1.10/src/everyrow/generated/models/import_request_token_data.py → everyrow-0.2.0/src/everyrow/generated/models/rank_operation_input_type_2.py +5 -5
- everyrow-0.1.10/src/everyrow/generated/models/artifact_group_record_metadata_type_0.py → everyrow-0.2.0/src/everyrow/generated/models/rank_operation_response_schema_type_0.py +5 -5
- everyrow-0.2.0/src/everyrow/generated/models/screen_operation.py +186 -0
- everyrow-0.2.0/src/everyrow/generated/models/screen_operation_input_type_1_item.py +46 -0
- everyrow-0.2.0/src/everyrow/generated/models/screen_operation_input_type_2.py +46 -0
- everyrow-0.2.0/src/everyrow/generated/models/screen_operation_response_schema_type_0.py +46 -0
- everyrow-0.1.10/src/everyrow/generated/models/create_session_response.py → everyrow-0.2.0/src/everyrow/generated/models/session_response.py +7 -8
- everyrow-0.2.0/src/everyrow/generated/models/single_agent_operation.py +304 -0
- everyrow-0.2.0/src/everyrow/generated/models/single_agent_operation_input_type_1_item.py +46 -0
- everyrow-0.2.0/src/everyrow/generated/models/single_agent_operation_input_type_2.py +46 -0
- everyrow-0.2.0/src/everyrow/generated/models/single_agent_operation_response_schema_type_0.py +46 -0
- everyrow-0.2.0/src/everyrow/generated/models/task_result_response.py +185 -0
- everyrow-0.2.0/src/everyrow/generated/models/task_result_response_data_type_0_item.py +46 -0
- everyrow-0.2.0/src/everyrow/generated/models/task_result_response_data_type_1.py +46 -0
- everyrow-0.2.0/src/everyrow/generated/models/task_status_response.py +192 -0
- everyrow-0.2.0/src/everyrow/ops.py +686 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/session.py +5 -7
- everyrow-0.2.0/src/everyrow/task.py +183 -0
- everyrow-0.2.0/tests/integration/__init__.py +0 -0
- everyrow-0.2.0/tests/integration/conftest.py +115 -0
- everyrow-0.2.0/tests/integration/test_agent_map.py +82 -0
- everyrow-0.2.0/tests/integration/test_dedupe.py +104 -0
- everyrow-0.2.0/tests/integration/test_merge.py +108 -0
- everyrow-0.2.0/tests/integration/test_rank.py +111 -0
- everyrow-0.2.0/tests/integration/test_screen.py +89 -0
- everyrow-0.2.0/tests/integration/test_single_agent.py +83 -0
- everyrow-0.2.0/tests/test_ops.py +593 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/uv.lock +25 -1
- everyrow-0.1.10/.env.example +0 -6
- everyrow-0.1.10/docs/case_studies/basic-usage/notebook.ipynb +0 -684
- everyrow-0.1.10/src/everyrow/__init__.py +0 -5
- everyrow-0.1.10/src/everyrow/citations.py +0 -50
- everyrow-0.1.10/src/everyrow/constants.py +0 -4
- everyrow-0.1.10/src/everyrow/generated/api/default/continue_task_endpoint_tasks_continue_post.py +0 -208
- everyrow-0.1.10/src/everyrow/generated/api/default/create_api_key_endpoint_api_keys_create_post.py +0 -186
- everyrow-0.1.10/src/everyrow/generated/api/default/create_session_endpoint_sessions_create_post.py +0 -198
- everyrow-0.1.10/src/everyrow/generated/api/default/generate_feedback_endpoint_tasks_generate_feedback_post.py +0 -186
- everyrow-0.1.10/src/everyrow/generated/api/default/get_artifacts_artifacts_get.py +0 -260
- everyrow-0.1.10/src/everyrow/generated/api/default/get_default_timeout_seconds_models_default_timeout_seconds_get.py +0 -165
- everyrow-0.1.10/src/everyrow/generated/api/default/get_metrics_metrics_get.py +0 -80
- everyrow-0.1.10/src/everyrow/generated/api/default/get_user_usage_usage_get.py +0 -123
- everyrow-0.1.10/src/everyrow/generated/api/default/healthz_healthz_get.py +0 -127
- everyrow-0.1.10/src/everyrow/generated/api/default/import_from_google_sheets_import_post.py +0 -170
- everyrow-0.1.10/src/everyrow/generated/api/default/list_api_keys_endpoint_api_keys_get.py +0 -186
- everyrow-0.1.10/src/everyrow/generated/api/default/revoke_api_key_endpoint_api_keys_key_id_revoke_post.py +0 -181
- everyrow-0.1.10/src/everyrow/generated/api/default/revoke_jobs_for_task_jobs_revoke_post.py +0 -164
- everyrow-0.1.10/src/everyrow/generated/api/default/rollback_to_message_endpoint_tasks_chat_rollback_post.py +0 -186
- everyrow-0.1.10/src/everyrow/generated/api/default/submit_chat_task_tasks_chat_post.py +0 -164
- everyrow-0.1.10/src/everyrow/generated/api/default/task_resource_estimation_task_resource_estimation_post.py +0 -319
- everyrow-0.1.10/src/everyrow/generated/api/default/trigger_workflow_execution_endpoint_workflows_trigger_post.py +0 -166
- everyrow-0.1.10/src/everyrow/generated/api/default/whoami_whoami_get.py +0 -127
- everyrow-0.1.10/src/everyrow/generated/models/__init__.py +0 -281
- everyrow-0.1.10/src/everyrow/generated/models/agent_improvement_instruction.py +0 -69
- everyrow-0.1.10/src/everyrow/generated/models/agent_query_params.py +0 -383
- everyrow-0.1.10/src/everyrow/generated/models/agent_query_params_system_prompt_kind_type_0.py +0 -10
- everyrow-0.1.10/src/everyrow/generated/models/agent_task_args.py +0 -163
- everyrow-0.1.10/src/everyrow/generated/models/agent_task_args_processing_mode.py +0 -9
- everyrow-0.1.10/src/everyrow/generated/models/allowed_suggestions.py +0 -9
- everyrow-0.1.10/src/everyrow/generated/models/api_key_info.py +0 -163
- everyrow-0.1.10/src/everyrow/generated/models/artifact_changed_payload.py +0 -89
- everyrow-0.1.10/src/everyrow/generated/models/artifact_group_record.py +0 -363
- everyrow-0.1.10/src/everyrow/generated/models/artifact_group_record_trace_mapping_type_0.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/artifact_status.py +0 -14
- everyrow-0.1.10/src/everyrow/generated/models/auto_cohort_conversation_message.py +0 -533
- everyrow-0.1.10/src/everyrow/generated/models/aux_data.py +0 -128
- everyrow-0.1.10/src/everyrow/generated/models/aux_data_source_bank.py +0 -59
- everyrow-0.1.10/src/everyrow/generated/models/chat_message_metadata.py +0 -193
- everyrow-0.1.10/src/everyrow/generated/models/concatenate_query_params.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/concatenate_request.py +0 -306
- everyrow-0.1.10/src/everyrow/generated/models/continue_reason.py +0 -9
- everyrow-0.1.10/src/everyrow/generated/models/controller_improvement_round.py +0 -79
- everyrow-0.1.10/src/everyrow/generated/models/conversation_changed_payload.py +0 -89
- everyrow-0.1.10/src/everyrow/generated/models/copy_artifacts_request.py +0 -70
- everyrow-0.1.10/src/everyrow/generated/models/copy_workflow_request.py +0 -62
- everyrow-0.1.10/src/everyrow/generated/models/copy_workflow_response.py +0 -70
- everyrow-0.1.10/src/everyrow/generated/models/create_api_key_request.py +0 -95
- everyrow-0.1.10/src/everyrow/generated/models/create_api_key_response.py +0 -96
- everyrow-0.1.10/src/everyrow/generated/models/create_group_query_params.py +0 -61
- everyrow-0.1.10/src/everyrow/generated/models/create_group_request.py +0 -305
- everyrow-0.1.10/src/everyrow/generated/models/create_request.py +0 -305
- everyrow-0.1.10/src/everyrow/generated/models/create_workflow_from_artifact_request.py +0 -92
- everyrow-0.1.10/src/everyrow/generated/models/create_workflow_from_artifact_response.py +0 -70
- everyrow-0.1.10/src/everyrow/generated/models/data_frame_method.py +0 -18
- everyrow-0.1.10/src/everyrow/generated/models/date_cutoffs.py +0 -145
- everyrow-0.1.10/src/everyrow/generated/models/dedupe_public_params.py +0 -64
- everyrow-0.1.10/src/everyrow/generated/models/dedupe_request_params.py +0 -311
- everyrow-0.1.10/src/everyrow/generated/models/deep_merge_public_params.py +0 -143
- everyrow-0.1.10/src/everyrow/generated/models/deep_merge_request.py +0 -313
- everyrow-0.1.10/src/everyrow/generated/models/deep_rank_public_params.py +0 -109
- everyrow-0.1.10/src/everyrow/generated/models/deep_rank_request.py +0 -313
- everyrow-0.1.10/src/everyrow/generated/models/deep_screen_public_params.py +0 -132
- everyrow-0.1.10/src/everyrow/generated/models/deep_screen_request.py +0 -313
- everyrow-0.1.10/src/everyrow/generated/models/derive_expression.py +0 -69
- everyrow-0.1.10/src/everyrow/generated/models/derive_query_params.py +0 -75
- everyrow-0.1.10/src/everyrow/generated/models/derive_request.py +0 -307
- everyrow-0.1.10/src/everyrow/generated/models/document_query_tool.py +0 -12
- everyrow-0.1.10/src/everyrow/generated/models/drop_columns_query_params.py +0 -61
- everyrow-0.1.10/src/everyrow/generated/models/drop_columns_request.py +0 -305
- everyrow-0.1.10/src/everyrow/generated/models/event_type.py +0 -14
- everyrow-0.1.10/src/everyrow/generated/models/execution_metadata.py +0 -146
- everyrow-0.1.10/src/everyrow/generated/models/export_request.py +0 -75
- everyrow-0.1.10/src/everyrow/generated/models/export_to_google_sheets_export_post_response_export_to_google_sheets_export_post.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/filter_query_params.py +0 -91
- everyrow-0.1.10/src/everyrow/generated/models/filter_request.py +0 -305
- everyrow-0.1.10/src/everyrow/generated/models/flatten_query_params.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/flatten_request.py +0 -305
- everyrow-0.1.10/src/everyrow/generated/models/generate_feedback_request.py +0 -62
- everyrow-0.1.10/src/everyrow/generated/models/group_by_query_params.py +0 -62
- everyrow-0.1.10/src/everyrow/generated/models/group_by_request.py +0 -305
- everyrow-0.1.10/src/everyrow/generated/models/healthz_healthz_get_response_healthz_healthz_get.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/image_chat_content_part.py +0 -80
- everyrow-0.1.10/src/everyrow/generated/models/image_chat_content_part_image_url.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/import_from_google_sheets_import_post_response_import_from_google_sheets_import_post.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/import_request.py +0 -83
- everyrow-0.1.10/src/everyrow/generated/models/join_query_params.py +0 -73
- everyrow-0.1.10/src/everyrow/generated/models/join_request.py +0 -305
- everyrow-0.1.10/src/everyrow/generated/models/map_agent_request_params.py +0 -313
- everyrow-0.1.10/src/everyrow/generated/models/map_multi_agent_request_params.py +0 -313
- everyrow-0.1.10/src/everyrow/generated/models/message_created_payload.py +0 -98
- everyrow-0.1.10/src/everyrow/generated/models/multi_agent_query_params.py +0 -264
- everyrow-0.1.10/src/everyrow/generated/models/multi_modal_chat_message.py +0 -160
- everyrow-0.1.10/src/everyrow/generated/models/multi_modal_chat_message_role.py +0 -10
- everyrow-0.1.10/src/everyrow/generated/models/preview_metadata.py +0 -144
- everyrow-0.1.10/src/everyrow/generated/models/processing_mode.py +0 -10
- everyrow-0.1.10/src/everyrow/generated/models/progress_status.py +0 -83
- everyrow-0.1.10/src/everyrow/generated/models/queue_stats.py +0 -77
- everyrow-0.1.10/src/everyrow/generated/models/reduce_agent_request_params.py +0 -305
- everyrow-0.1.10/src/everyrow/generated/models/reduce_multi_agent_request_params.py +0 -305
- everyrow-0.1.10/src/everyrow/generated/models/resource_estimation_response.py +0 -85
- everyrow-0.1.10/src/everyrow/generated/models/response_schema_type.py +0 -9
- everyrow-0.1.10/src/everyrow/generated/models/revoke_api_key_response.py +0 -61
- everyrow-0.1.10/src/everyrow/generated/models/rollback_to_message_request.py +0 -62
- everyrow-0.1.10/src/everyrow/generated/models/rollback_to_message_response.py +0 -77
- everyrow-0.1.10/src/everyrow/generated/models/session_changed_payload.py +0 -69
- everyrow-0.1.10/src/everyrow/generated/models/simple_chat_message.py +0 -121
- everyrow-0.1.10/src/everyrow/generated/models/simple_chat_message_role.py +0 -10
- everyrow-0.1.10/src/everyrow/generated/models/simple_chat_message_with_tool_calls.py +0 -156
- everyrow-0.1.10/src/everyrow/generated/models/source_database_entry.py +0 -92
- everyrow-0.1.10/src/everyrow/generated/models/standalone_artifact_record.py +0 -311
- everyrow-0.1.10/src/everyrow/generated/models/standalone_artifact_record_metadata_type_0.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/standalone_artifact_record_trace_mapping_type_0.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/status_count.py +0 -71
- everyrow-0.1.10/src/everyrow/generated/models/status_count_status.py +0 -13
- everyrow-0.1.10/src/everyrow/generated/models/submit_chat_task_body.py +0 -497
- everyrow-0.1.10/src/everyrow/generated/models/submit_chat_task_body_selected_task_type_type_0.py +0 -11
- everyrow-0.1.10/src/everyrow/generated/models/submit_task_body.py +0 -745
- everyrow-0.1.10/src/everyrow/generated/models/task_changed_payload.py +0 -105
- everyrow-0.1.10/src/everyrow/generated/models/task_effort.py +0 -10
- everyrow-0.1.10/src/everyrow/generated/models/task_id_request.py +0 -62
- everyrow-0.1.10/src/everyrow/generated/models/task_insert.py +0 -725
- everyrow-0.1.10/src/everyrow/generated/models/task_metadata.py +0 -323
- everyrow-0.1.10/src/everyrow/generated/models/task_response.py +0 -62
- everyrow-0.1.10/src/everyrow/generated/models/task_type.py +0 -31
- everyrow-0.1.10/src/everyrow/generated/models/text_chat_content_part.py +0 -74
- everyrow-0.1.10/src/everyrow/generated/models/tool_response_message.py +0 -127
- everyrow-0.1.10/src/everyrow/generated/models/toolkit_constants.py +0 -80
- everyrow-0.1.10/src/everyrow/generated/models/trace_changed_payload.py +0 -94
- everyrow-0.1.10/src/everyrow/generated/models/trace_info.py +0 -78
- everyrow-0.1.10/src/everyrow/generated/models/trigger_workflow_execution_request.py +0 -112
- everyrow-0.1.10/src/everyrow/generated/models/trigger_workflow_execution_request_task_params.py +0 -65
- everyrow-0.1.10/src/everyrow/generated/models/trigger_workflow_execution_request_task_params_additional_property.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/trigger_workflow_execution_response.py +0 -69
- everyrow-0.1.10/src/everyrow/generated/models/upload_csv_payload.py +0 -310
- everyrow-0.1.10/src/everyrow/generated/models/upload_csv_query_params.py +0 -114
- everyrow-0.1.10/src/everyrow/generated/models/usage_response.py +0 -77
- everyrow-0.1.10/src/everyrow/generated/models/whoami_whoami_get_response_whoami_whoami_get.py +0 -46
- everyrow-0.1.10/src/everyrow/generated/models/workflow_leaf_node_input.py +0 -70
- everyrow-0.1.10/src/everyrow/ops.py +0 -760
- everyrow-0.1.10/src/everyrow/task.py +0 -230
- everyrow-0.1.10/tests/test_ops.py +0 -416
- {everyrow-0.1.10 → everyrow-0.2.0}/.gitattributes +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/.github/workflows/ci.yaml +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/.github/workflows/publish.yaml +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/.github/workflows/skill-version-check.yaml +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/LICENSE.txt +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/S&P 500 Companies.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/b2b_companies.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/case_01_crm_data.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/companies.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/company_info.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/contacts_list_a.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/contacts_list_b.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/crm_contacts.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/crm_funds.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/crm_ready_contacts.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/hn_jobs.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/investment_firms.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/investment_funds.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/job_postings.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/oil_price_margin_screen_results.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/qualified_leads.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/researchers.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/saas_products.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/texas_cities.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/thematic_screen_results.csv +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/docs/data/valuations.csv +0 -0
- {everyrow-0.1.10/docs/case_studies → everyrow-0.2.0/docs/in_progress}/dedupe-researchers-across-career-changes/notebook.ipynb +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/.mcpbignore +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/src/everyrow_mcp/__init__.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/src/everyrow_mcp/utils.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/tests/__init__.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/tests/conftest.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/tests/test_integration.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/tests/test_server.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/tests/test_utils.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/everyrow-mcp/uv.lock +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/images/future-search-logo-128.webp +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/lefthook.yml +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/openapi-python-client.yaml +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/skills/everyrow-sdk/SKILL.md +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/generated/api/__init__.py +0 -0
- {everyrow-0.1.10/src/everyrow/generated/api/default → everyrow-0.2.0/src/everyrow/generated/api/artifacts}/__init__.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/generated/client.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/generated/errors.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/generated/models/http_validation_error.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/generated/models/task_status.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/generated/models/validation_error.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/generated/py.typed +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/generated/types.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/src/everyrow/result.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/tests/__init__.py +0 -0
- {everyrow-0.1.10 → everyrow-0.2.0}/tests/test_version.py +0 -0
|
@@ -11,7 +11,7 @@
|
|
|
11
11
|
"name": "everyrow",
|
|
12
12
|
"source": "./",
|
|
13
13
|
"description": "Claude Code plugin for the everyrow SDK - AI-powered data processing utilities for transforming, deduping, merging, ranking, and screening dataframes",
|
|
14
|
-
"version": "0.
|
|
14
|
+
"version": "0.2.0"
|
|
15
15
|
}
|
|
16
16
|
]
|
|
17
17
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "everyrow",
|
|
3
3
|
"description": "Claude Code plugin for the everyrow SDK - AI-powered data processing utilities for transforming, deduping, merging, ranking, and screening dataframes",
|
|
4
|
-
"version": "0.
|
|
4
|
+
"version": "0.2.0",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "FutureSearch"
|
|
7
7
|
},
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
name: Deploy Docs Site
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
workflow_dispatch:
|
|
5
|
+
inputs:
|
|
6
|
+
deploy_production:
|
|
7
|
+
description: "Deploy to production"
|
|
8
|
+
type: boolean
|
|
9
|
+
required: false
|
|
10
|
+
default: true
|
|
11
|
+
push:
|
|
12
|
+
branches: ["main"]
|
|
13
|
+
paths:
|
|
14
|
+
- "docs/**"
|
|
15
|
+
- "docs-site/**"
|
|
16
|
+
- ".github/workflows/deploy-docs.yaml"
|
|
17
|
+
pull_request:
|
|
18
|
+
paths:
|
|
19
|
+
- "docs/**"
|
|
20
|
+
- "docs-site/**"
|
|
21
|
+
- ".github/workflows/deploy-docs.yaml"
|
|
22
|
+
|
|
23
|
+
env:
|
|
24
|
+
# GCS bucket for static docs files (e.g., "everyrow-docs")
|
|
25
|
+
# Traefik needs to route everyrow.io/docs/* to this bucket
|
|
26
|
+
DOCS_BUCKET: ${{ vars.DOCS_BUCKET }}
|
|
27
|
+
|
|
28
|
+
permissions:
|
|
29
|
+
contents: read
|
|
30
|
+
id-token: write
|
|
31
|
+
|
|
32
|
+
concurrency:
|
|
33
|
+
group: ${{ github.workflow }}-${{ github.ref }}
|
|
34
|
+
cancel-in-progress: true
|
|
35
|
+
|
|
36
|
+
jobs:
|
|
37
|
+
build-and-deploy:
|
|
38
|
+
runs-on: ubuntu-latest
|
|
39
|
+
steps:
|
|
40
|
+
- name: Checkout repository
|
|
41
|
+
uses: actions/checkout@v4
|
|
42
|
+
|
|
43
|
+
# --- Python setup for notebook conversion ---
|
|
44
|
+
- name: Install uv
|
|
45
|
+
uses: astral-sh/setup-uv@v5
|
|
46
|
+
|
|
47
|
+
- name: Set up Python
|
|
48
|
+
run: uv python install 3.12
|
|
49
|
+
|
|
50
|
+
# TODO: Re-enable after fixing notebook descriptions (see PR #91)
|
|
51
|
+
# - name: Validate notebook structure
|
|
52
|
+
# run: python docs-site/scripts/validate-notebooks.py
|
|
53
|
+
|
|
54
|
+
- name: Convert notebooks to HTML
|
|
55
|
+
run: uv run --group case-studies python docs-site/scripts/convert-notebooks.py
|
|
56
|
+
|
|
57
|
+
# --- Node.js setup for docs site ---
|
|
58
|
+
- name: Install pnpm
|
|
59
|
+
uses: pnpm/action-setup@v4
|
|
60
|
+
with:
|
|
61
|
+
version: 10
|
|
62
|
+
|
|
63
|
+
- name: Set up Node.js
|
|
64
|
+
uses: actions/setup-node@v4
|
|
65
|
+
with:
|
|
66
|
+
node-version: "22"
|
|
67
|
+
cache: "pnpm"
|
|
68
|
+
cache-dependency-path: "docs-site/pnpm-lock.yaml"
|
|
69
|
+
|
|
70
|
+
- name: Install dependencies
|
|
71
|
+
working-directory: ./docs-site
|
|
72
|
+
run: pnpm install --frozen-lockfile
|
|
73
|
+
|
|
74
|
+
- name: Build static site
|
|
75
|
+
working-directory: ./docs-site
|
|
76
|
+
run: pnpm build
|
|
77
|
+
|
|
78
|
+
- name: Log in to Google Cloud
|
|
79
|
+
if: ${{ github.ref == 'refs/heads/main' || inputs.deploy_production }}
|
|
80
|
+
uses: google-github-actions/auth@v2
|
|
81
|
+
with:
|
|
82
|
+
credentials_json: ${{ secrets.GCP_CREDENTIALS_GLOBAL }}
|
|
83
|
+
|
|
84
|
+
- name: Set up Cloud SDK
|
|
85
|
+
if: ${{ github.ref == 'refs/heads/main' || inputs.deploy_production }}
|
|
86
|
+
uses: google-github-actions/setup-gcloud@v2
|
|
87
|
+
|
|
88
|
+
- name: Deploy docs site to GCS
|
|
89
|
+
if: ${{ github.ref == 'refs/heads/main' || inputs.deploy_production }}
|
|
90
|
+
working-directory: ./docs-site
|
|
91
|
+
run: |
|
|
92
|
+
gsutil -m -h "Cache-Control:max-age=300" rsync -r -d ./out/ gs://${{ env.DOCS_BUCKET }}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
name: Docs Sync Check
|
|
2
|
+
|
|
3
|
+
# LLM AGENTS: Installation instructions must stay in sync across:
|
|
4
|
+
# - README.md (main SDK readme)
|
|
5
|
+
# - everyrow-mcp/README.md (MCP server readme)
|
|
6
|
+
# - docs/installation.md (consolidated installation guide)
|
|
7
|
+
|
|
8
|
+
on:
|
|
9
|
+
pull_request:
|
|
10
|
+
types: [opened, synchronize, reopened]
|
|
11
|
+
|
|
12
|
+
jobs:
|
|
13
|
+
check-docs-sync:
|
|
14
|
+
runs-on: ubuntu-latest
|
|
15
|
+
permissions:
|
|
16
|
+
contents: read
|
|
17
|
+
pull-requests: read
|
|
18
|
+
steps:
|
|
19
|
+
- name: Checkout repository
|
|
20
|
+
uses: actions/checkout@v4
|
|
21
|
+
with:
|
|
22
|
+
fetch-depth: 0
|
|
23
|
+
|
|
24
|
+
- name: Check if docs files changed
|
|
25
|
+
id: check-files
|
|
26
|
+
run: |
|
|
27
|
+
# Get list of changed files in this PR
|
|
28
|
+
CHANGED_FILES=$(git diff --name-only origin/${{ github.base_ref }}...HEAD)
|
|
29
|
+
|
|
30
|
+
# Check if any README.md or docs/ files changed
|
|
31
|
+
MATCHED_FILES=""
|
|
32
|
+
for file in $CHANGED_FILES; do
|
|
33
|
+
if [[ "$file" == *"README.md" ]] || [[ "$file" == docs/* ]]; then
|
|
34
|
+
MATCHED_FILES="$MATCHED_FILES $file"
|
|
35
|
+
fi
|
|
36
|
+
done
|
|
37
|
+
|
|
38
|
+
if [ -n "$MATCHED_FILES" ]; then
|
|
39
|
+
echo "found=true" >> $GITHUB_OUTPUT
|
|
40
|
+
echo "files=$MATCHED_FILES" >> $GITHUB_OUTPUT
|
|
41
|
+
echo "Found docs files in changed files:$MATCHED_FILES"
|
|
42
|
+
else
|
|
43
|
+
echo "found=false" >> $GITHUB_OUTPUT
|
|
44
|
+
echo "No docs files changed"
|
|
45
|
+
fi
|
|
46
|
+
|
|
47
|
+
- name: Verify docs consistency with Claude
|
|
48
|
+
if: steps.check-files.outputs.found == 'true'
|
|
49
|
+
uses: anthropics/claude-code-action@v1
|
|
50
|
+
with:
|
|
51
|
+
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
|
52
|
+
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
53
|
+
prompt: |
|
|
54
|
+
TASK: Verify installation instructions are consistent (pass/fail CI check)
|
|
55
|
+
|
|
56
|
+
Changed docs files: ${{ steps.check-files.outputs.files }}
|
|
57
|
+
|
|
58
|
+
Files that must stay in sync:
|
|
59
|
+
|
|
60
|
+
1. README.md - "Coding agent plugins" section (Claude Code, Gemini CLI, Codex CLI, Cursor)
|
|
61
|
+
2. everyrow-mcp/README.md - "Installation" section (Claude Desktop, Manual Config)
|
|
62
|
+
3. docs/installation.md - Contains both SDK and MCP installation instructions
|
|
63
|
+
|
|
64
|
+
What must be consistent:
|
|
65
|
+
- Claude Code plugin commands must match
|
|
66
|
+
- Gemini CLI commands and settings must match
|
|
67
|
+
- Codex CLI commands must match
|
|
68
|
+
- Cursor setup steps must match
|
|
69
|
+
- MCP server config (uvx/everyrow-mcp) must match
|
|
70
|
+
- Claude Desktop .mcpb installation instructions must match
|
|
71
|
+
- API key setup instructions must match
|
|
72
|
+
|
|
73
|
+
INSTRUCTIONS:
|
|
74
|
+
1. Read all three files listed above
|
|
75
|
+
2. Compare the installation instructions in each
|
|
76
|
+
3. Verify commands and configurations are identical where they overlap
|
|
77
|
+
4. If consistent: exit 0
|
|
78
|
+
5. If inconsistent: exit 1 and explain what differs
|
|
79
|
+
claude_args: |
|
|
80
|
+
--model haiku
|
|
81
|
+
--allowedTools "Read,Bash(exit:*)"
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
name: Integration Tests
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches: [main]
|
|
6
|
+
workflow_dispatch:
|
|
7
|
+
|
|
8
|
+
jobs:
|
|
9
|
+
integration-tests:
|
|
10
|
+
runs-on: ubuntu-latest
|
|
11
|
+
|
|
12
|
+
steps:
|
|
13
|
+
- name: Checkout repository
|
|
14
|
+
uses: actions/checkout@v4
|
|
15
|
+
|
|
16
|
+
- name: Set up Python
|
|
17
|
+
uses: actions/setup-python@v5
|
|
18
|
+
with:
|
|
19
|
+
python-version: "3.12"
|
|
20
|
+
|
|
21
|
+
- name: Install uv
|
|
22
|
+
uses: astral-sh/setup-uv@v4
|
|
23
|
+
|
|
24
|
+
- name: Install dependencies
|
|
25
|
+
run: uv sync
|
|
26
|
+
|
|
27
|
+
- name: Run integration tests
|
|
28
|
+
env:
|
|
29
|
+
EVERYROW_API_KEY: ${{ secrets.EVERYROW_API_KEY }}
|
|
30
|
+
run: |
|
|
31
|
+
uv run pip install pytest-xdist
|
|
32
|
+
uv run pytest -n 8 tests/integration/ -v -m integration --tb=short
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
cff-version: 1.2.0
|
|
2
|
+
message: "If you use this software, please cite it as below."
|
|
3
|
+
type: software
|
|
4
|
+
title: "everyrow"
|
|
5
|
+
abstract: "Screen, rank, dedupe, and merge dataframes using natural language. Run web agents to research every row."
|
|
6
|
+
license: MIT
|
|
7
|
+
version: 0.2.0
|
|
8
|
+
date-released: 2026-01-26
|
|
9
|
+
repository-code: "https://github.com/futuresearch/everyrow-sdk"
|
|
10
|
+
url: "https://everyrow.io"
|
|
11
|
+
keywords:
|
|
12
|
+
- data-processing
|
|
13
|
+
- pandas
|
|
14
|
+
- dataframe
|
|
15
|
+
- natural-language
|
|
16
|
+
- ai
|
|
17
|
+
- deduplication
|
|
18
|
+
- data-merging
|
|
19
|
+
- web-agents
|
|
20
|
+
authors:
|
|
21
|
+
- name: "FutureSearch"
|
|
22
|
+
website: "https://futuresearch.ai"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: everyrow
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: An SDK for everyrow.io: agent ops at spreadsheet scale
|
|
5
5
|
License-File: LICENSE.txt
|
|
6
6
|
Requires-Python: >=3.12
|
|
@@ -213,7 +213,7 @@ result = await agent_map(
|
|
|
213
213
|
print(result.data.head())
|
|
214
214
|
```
|
|
215
215
|
|
|
216
|
-
**More:** [docs](docs/
|
|
216
|
+
**More:** [docs](docs/reference/RESEARCH.md) / [basic usage](docs/case_studies/basic-usage/notebook.ipynb)
|
|
217
217
|
|
|
218
218
|
### Derive
|
|
219
219
|
|
|
@@ -355,11 +355,12 @@ lefthook install
|
|
|
355
355
|
```
|
|
356
356
|
|
|
357
357
|
```bash
|
|
358
|
-
uv run pytest
|
|
359
|
-
uv run
|
|
360
|
-
uv run ruff
|
|
361
|
-
uv run
|
|
362
|
-
|
|
358
|
+
uv run pytest # unit tests
|
|
359
|
+
uv run --env-file .env pytest -m integration # integration tests (requires EVERYROW_API_KEY)
|
|
360
|
+
uv run ruff check . # lint
|
|
361
|
+
uv run ruff format . # format
|
|
362
|
+
uv run basedpyright # type check
|
|
363
|
+
./generate_openapi.sh # regenerate client
|
|
363
364
|
```
|
|
364
365
|
|
|
365
366
|
---
|
|
@@ -370,4 +371,17 @@ Built by [FutureSearch](https://futuresearch.ai). We kept running into the same
|
|
|
370
371
|
|
|
371
372
|
[everyrow.io](https://everyrow.io) (app/dashboard) · [case studies](https://futuresearch.ai/solutions/) · [research](https://futuresearch.ai/research/)
|
|
372
373
|
|
|
373
|
-
|
|
374
|
+
**Citing everyrow:** If you use this software in your research, please cite it using the metadata in [CITATION.cff](CITATION.cff) or the BibTeX below:
|
|
375
|
+
|
|
376
|
+
```bibtex
|
|
377
|
+
@software{everyrow,
|
|
378
|
+
author = {FutureSearch},
|
|
379
|
+
title = {everyrow},
|
|
380
|
+
url = {https://github.com/futuresearch/everyrow-sdk},
|
|
381
|
+
version = {0.2.0},
|
|
382
|
+
year = {2026},
|
|
383
|
+
license = {MIT}
|
|
384
|
+
}
|
|
385
|
+
```
|
|
386
|
+
|
|
387
|
+
**License** MIT license. See [LICENSE.txt](LICENSE.txt).
|
|
@@ -200,7 +200,7 @@ result = await agent_map(
|
|
|
200
200
|
print(result.data.head())
|
|
201
201
|
```
|
|
202
202
|
|
|
203
|
-
**More:** [docs](docs/
|
|
203
|
+
**More:** [docs](docs/reference/RESEARCH.md) / [basic usage](docs/case_studies/basic-usage/notebook.ipynb)
|
|
204
204
|
|
|
205
205
|
### Derive
|
|
206
206
|
|
|
@@ -342,11 +342,12 @@ lefthook install
|
|
|
342
342
|
```
|
|
343
343
|
|
|
344
344
|
```bash
|
|
345
|
-
uv run pytest
|
|
346
|
-
uv run
|
|
347
|
-
uv run ruff
|
|
348
|
-
uv run
|
|
349
|
-
|
|
345
|
+
uv run pytest # unit tests
|
|
346
|
+
uv run --env-file .env pytest -m integration # integration tests (requires EVERYROW_API_KEY)
|
|
347
|
+
uv run ruff check . # lint
|
|
348
|
+
uv run ruff format . # format
|
|
349
|
+
uv run basedpyright # type check
|
|
350
|
+
./generate_openapi.sh # regenerate client
|
|
350
351
|
```
|
|
351
352
|
|
|
352
353
|
---
|
|
@@ -357,4 +358,17 @@ Built by [FutureSearch](https://futuresearch.ai). We kept running into the same
|
|
|
357
358
|
|
|
358
359
|
[everyrow.io](https://everyrow.io) (app/dashboard) · [case studies](https://futuresearch.ai/solutions/) · [research](https://futuresearch.ai/research/)
|
|
359
360
|
|
|
360
|
-
|
|
361
|
+
**Citing everyrow:** If you use this software in your research, please cite it using the metadata in [CITATION.cff](CITATION.cff) or the BibTeX below:
|
|
362
|
+
|
|
363
|
+
```bibtex
|
|
364
|
+
@software{everyrow,
|
|
365
|
+
author = {FutureSearch},
|
|
366
|
+
title = {everyrow},
|
|
367
|
+
url = {https://github.com/futuresearch/everyrow-sdk},
|
|
368
|
+
version = {0.2.0},
|
|
369
|
+
year = {2026},
|
|
370
|
+
license = {MIT}
|
|
371
|
+
}
|
|
372
|
+
```
|
|
373
|
+
|
|
374
|
+
**License** MIT license. See [LICENSE.txt](LICENSE.txt).
|
|
@@ -1,3 +1,8 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: How to Add A Column to a DataFrame with Web Research
|
|
3
|
+
description: Step-by-step guide to enriching a pandas DataFrame with new columns using LLM-powered web research agents to find and add any data.
|
|
4
|
+
---
|
|
5
|
+
|
|
1
6
|
# How to Add a Column to a DataFrame Using Web Lookup
|
|
2
7
|
|
|
3
8
|
`pandas.apply()` runs a local function on each row. But it can't use LLM judgment or do web research to find new values. And doing this by hand can be very slow or expensive. EveryRow provides a one-line utility to do this cheaply and at scale.
|
{everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/dedupe-crm-company-records/notebook.ipynb
RENAMED
|
@@ -3,11 +3,7 @@
|
|
|
3
3
|
{
|
|
4
4
|
"cell_type": "markdown",
|
|
5
5
|
"metadata": {},
|
|
6
|
-
"source":
|
|
7
|
-
"# CRM Data Deduplication with everyrow SDK\n",
|
|
8
|
-
"\n",
|
|
9
|
-
"This notebook demonstrates how to use the everyrow SDK's `dedupe` operation to deduplicate messy CRM data using AI-powered semantic matching."
|
|
10
|
-
]
|
|
6
|
+
"source": "# How to use LLMs to deduplicate CRM Data\n\nThis notebook demonstrates how to use the everyrow SDK's `dedupe` operation to deduplicate messy CRM data using AI-powered semantic matching."
|
|
11
7
|
},
|
|
12
8
|
{
|
|
13
9
|
"cell_type": "markdown",
|
|
@@ -605,8 +601,11 @@
|
|
|
605
601
|
"pygments_lexer": "ipython3",
|
|
606
602
|
"version": "3.12.11"
|
|
607
603
|
},
|
|
608
|
-
"language_version": "3.12"
|
|
604
|
+
"language_version": "3.12",
|
|
605
|
+
"everyrow": {
|
|
606
|
+
"description": "Python notebook cleaning 500 CRM records with inconsistent company names, missing contacts, and partial email matches. Uses everyrow's dedupe() with a plain-English equivalence relation to find and group semantic duplicates."
|
|
607
|
+
}
|
|
609
608
|
},
|
|
610
609
|
"nbformat": 4,
|
|
611
610
|
"nbformat_minor": 4
|
|
612
|
-
}
|
|
611
|
+
}
|
|
@@ -3,17 +3,7 @@
|
|
|
3
3
|
{
|
|
4
4
|
"cell_type": "markdown",
|
|
5
5
|
"metadata": {},
|
|
6
|
-
"source": [
|
|
7
|
-
"# everyrow.io/merge Tutorial\n",
|
|
8
|
-
"\n",
|
|
9
|
-
"This notebook demonstrates the [everyrow.io SDK](https://github.com/futuresearch/everyrow-sdk) merge capabilities:\n",
|
|
10
|
-
"\n",
|
|
11
|
-
"1. **Fuzzy String Matching** - Handling typos and corrupted data\n",
|
|
12
|
-
"2. **LLM Merge** - Matching without common columns (company ↔ ticker)\n",
|
|
13
|
-
"3. **Web Merge** - Dynamic data requiring real-time verification (CEO matching)\n",
|
|
14
|
-
"\n",
|
|
15
|
-
"The SDK implements a cascade: **Exact → Fuzzy → LLM → Web**, using the simplest method that works."
|
|
16
|
-
]
|
|
6
|
+
"source": "# Fuzzy join two Pandas DataFrames using LLMs\n\nThis notebook demonstrates the [everyrow.io SDK](https://github.com/futuresearch/everyrow-sdk) merge capabilities:\n\n1. **Fuzzy String Matching** - Handling typos and corrupted data\n2. **LLM Merge** - Matching without common columns (company ↔ ticker)\n3. **Web Merge** - Dynamic data requiring real-time verification (CEO matching)\n\nThe SDK implements a cascade: **Exact → Fuzzy → LLM → Web**, using the simplest method that works."
|
|
17
7
|
},
|
|
18
8
|
{
|
|
19
9
|
"cell_type": "markdown",
|
|
@@ -481,6 +471,9 @@
|
|
|
481
471
|
"nbconvert_exporter": "python",
|
|
482
472
|
"pygments_lexer": "ipython3",
|
|
483
473
|
"version": "3.13.9"
|
|
474
|
+
},
|
|
475
|
+
"everyrow": {
|
|
476
|
+
"description": "Python notebook testing everyrow's merge cascade at 0%, 5%, and 10% name corruption on 438 S&P 500 companies. Demonstrates exact, fuzzy, LLM, and web matching for company names, tickers, and CEO data."
|
|
484
477
|
}
|
|
485
478
|
},
|
|
486
479
|
"nbformat": 4,
|
{everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/merge-contacts-with-company-data/notebook.ipynb
RENAMED
|
@@ -3,15 +3,7 @@
|
|
|
3
3
|
{
|
|
4
4
|
"cell_type": "markdown",
|
|
5
5
|
"metadata": {},
|
|
6
|
-
"source":
|
|
7
|
-
"# CRM Merge Workflow\n",
|
|
8
|
-
"\n",
|
|
9
|
-
"This notebook demonstrates using everyrow's `merge()` utility to join contact-level data with organization-level data before CRM upload.\n",
|
|
10
|
-
"\n",
|
|
11
|
-
"**Use Case:** Your data lives across multiple tables—contacts in one, company information in another. Before uploading to HubSpot/Salesforce, you need a flattened export where each contact row includes the associated company context.\n",
|
|
12
|
-
"\n",
|
|
13
|
-
"**Why everyrow?** Company names may not match exactly between tables (\"Acme Corp\" vs \"Acme Corporation\" vs \"ACME\"). The `merge()` function handles these variations semantically."
|
|
14
|
-
]
|
|
6
|
+
"source": "# How to merge datasets without common ID in Python\n\nThis notebook demonstrates using everyrow's `merge()` utility to join contact-level data with organization-level data before CRM upload.\n\n**Use Case:** Your data lives across multiple tables—contacts in one, company information in another. Before uploading to HubSpot/Salesforce, you need a flattened export where each contact row includes the associated company context.\n\n**Why everyrow?** Company names may not match exactly between tables (\"Acme Corp\" vs \"Acme Corporation\" vs \"ACME\"). The `merge()` function handles these variations semantically."
|
|
15
7
|
},
|
|
16
8
|
{
|
|
17
9
|
"cell_type": "code",
|
|
@@ -942,6 +934,9 @@
|
|
|
942
934
|
"nbconvert_exporter": "python",
|
|
943
935
|
"pygments_lexer": "ipython3",
|
|
944
936
|
"version": "3.14.2"
|
|
937
|
+
},
|
|
938
|
+
"everyrow": {
|
|
939
|
+
"description": "Python notebook joining a contacts table with a funds table for CRM upload. Resolves name variations like 'Citadel LLC' to 'Citadel' and 'D.E. Shaw' to 'D. E. Shaw & Co.' using everyrow's semantic merge."
|
|
945
940
|
}
|
|
946
941
|
},
|
|
947
942
|
"nbformat": 4,
|
{everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/merge-overlapping-contact-lists/notebook.ipynb
RENAMED
|
@@ -3,15 +3,7 @@
|
|
|
3
3
|
{
|
|
4
4
|
"cell_type": "markdown",
|
|
5
5
|
"metadata": {},
|
|
6
|
-
"source":
|
|
7
|
-
"# Merge Candidate Contact Lists\n",
|
|
8
|
-
"\n",
|
|
9
|
-
"This notebook demonstrates using everyrow's `merge()` utility to combine two overlapping contact lists where records lack exact matches.\n",
|
|
10
|
-
"\n",
|
|
11
|
-
"**Use Case:** You have candidate lists from two different sources and need to merge them to avoid sending duplicate recruiting emails. The challenge: less than 50% match exactly by name or email due to typos, nicknames, different email domains, and incomplete data.\n",
|
|
12
|
-
"\n",
|
|
13
|
-
"**Why everyrow?** Traditional approaches (VLOOKUP, fuzzy matching) fail on semantic variations. everyrow's `merge()` uses LLM-powered matching to intelligently identify duplicates despite significant data variations."
|
|
14
|
-
]
|
|
6
|
+
"source": "# Fuzzy match and merge contact lists in Python\n\nThis notebook demonstrates using everyrow's `merge()` utility to combine two overlapping contact lists where records lack exact matches.\n\n**Use Case:** You have candidate lists from two different sources and need to merge them to avoid sending duplicate recruiting emails. The challenge: less than 50% match exactly by name or email due to typos, nicknames, different email domains, and incomplete data.\n\n**Why everyrow?** Traditional approaches (VLOOKUP, fuzzy matching) fail on semantic variations. everyrow's `merge()` uses LLM-powered matching to intelligently identify duplicates despite significant data variations."
|
|
15
7
|
},
|
|
16
8
|
{
|
|
17
9
|
"cell_type": "code",
|
|
@@ -920,6 +912,9 @@
|
|
|
920
912
|
"nbconvert_exporter": "python",
|
|
921
913
|
"pygments_lexer": "ipython3",
|
|
922
914
|
"version": "3.14.2"
|
|
915
|
+
},
|
|
916
|
+
"everyrow": {
|
|
917
|
+
"description": "Python notebook combining two overlapping contact lists where fewer than 50% match exactly. Resolves nicknames (Bob/Robert), initials (S. Chen/Sarah Chen), and institutional variations using LLM-powered matching."
|
|
923
918
|
}
|
|
924
919
|
},
|
|
925
920
|
"nbformat": 4,
|
{everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/multi-stage-lead-qualification/notebook.ipynb
RENAMED
|
@@ -3,19 +3,7 @@
|
|
|
3
3
|
{
|
|
4
4
|
"cell_type": "markdown",
|
|
5
5
|
"metadata": {},
|
|
6
|
-
"source":
|
|
7
|
-
"# Multi-Stage Lead Screening Workflow\n",
|
|
8
|
-
"\n",
|
|
9
|
-
"This notebook demonstrates a **complex, multi-stage screening workflow** that combines multiple everyrow operations with pandas data transformations.\n",
|
|
10
|
-
"\n",
|
|
11
|
-
"**Use Case:** Qualify investment fund leads for a B2B research tools company. The workflow:\n",
|
|
12
|
-
"1. Score funds by \"contrarian\" research approach (likely to adopt new tools)\n",
|
|
13
|
-
"2. Filter to high-scoring candidates using pandas\n",
|
|
14
|
-
"3. Research team sizes for remaining candidates\n",
|
|
15
|
-
"4. Apply nuanced inclusion logic: include funds with strong research signals OR very small teams\n",
|
|
16
|
-
"\n",
|
|
17
|
-
"**Why this approach?** Traditional tools force binary choices. This workflow captures the nuanced mental model: \"I want funds that show research-tool-adoption signals, but I'll also include tiny funds where even weak signals matter.\""
|
|
18
|
-
]
|
|
6
|
+
"source": "# Build an AI lead qualification pipeline in Python\n\nThis notebook demonstrates a **complex, multi-stage screening workflow** that combines multiple everyrow operations with pandas data transformations.\n\n**Use Case:** Qualify investment fund leads for a B2B research tools company. The workflow:\n1. Score funds by \"contrarian\" research approach (likely to adopt new tools)\n2. Filter to high-scoring candidates using pandas\n3. Research team sizes for remaining candidates\n4. Apply nuanced inclusion logic: include funds with strong research signals OR very small teams\n\n**Why this approach?** Traditional tools force binary choices. This workflow captures the nuanced mental model: \"I want funds that show research-tool-adoption signals, but I'll also include tiny funds where even weak signals matter.\""
|
|
19
7
|
},
|
|
20
8
|
{
|
|
21
9
|
"cell_type": "code",
|
|
@@ -933,6 +921,9 @@
|
|
|
933
921
|
"nbconvert_exporter": "python",
|
|
934
922
|
"pygments_lexer": "ipython3",
|
|
935
923
|
"version": "3.14.2"
|
|
924
|
+
},
|
|
925
|
+
"everyrow": {
|
|
926
|
+
"description": "Python notebook building a four-stage pipeline: score funds by research-tool adoption, filter by threshold with pandas, estimate team sizes via web research, then apply nuanced inclusion logic combining both signals."
|
|
936
927
|
}
|
|
937
928
|
},
|
|
938
929
|
"nbformat": 4,
|
{everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/research-and-rank-permit-times/notebook.ipynb
RENAMED
|
@@ -3,15 +3,7 @@
|
|
|
3
3
|
{
|
|
4
4
|
"cell_type": "markdown",
|
|
5
5
|
"metadata": {},
|
|
6
|
-
"source":
|
|
7
|
-
"# Texas Permit Processing Times\n",
|
|
8
|
-
"\n",
|
|
9
|
-
"This notebook demonstrates using everyrow's `rank()` utility with **web research capabilities** to gather and rank real-world data that isn't available in a structured format.\n",
|
|
10
|
-
"\n",
|
|
11
|
-
"**Use Case:** Real estate investors need permit processing timelines to evaluate markets—delays directly impact holding costs. But municipalities publish this data inconsistently: some on websites, some in PDFs, some not at all.\n",
|
|
12
|
-
"\n",
|
|
13
|
-
"**Why everyrow?** The `rank()` function can perform web research to find permit processing times from official sources, contractor reports, and comparable city data—then rank cities by speed."
|
|
14
|
-
]
|
|
6
|
+
"source": "# Use LLM Agents to research government data at scale\n\nThis notebook demonstrates using everyrow's `rank()` utility with **web research capabilities** to gather and rank real-world data that isn't available in a structured format.\n\n**Use Case:** Real estate investors need permit processing timelines to evaluate markets—delays directly impact holding costs. But municipalities publish this data inconsistently: some on websites, some in PDFs, some not at all.\n\n**Why everyrow?** The `rank()` function can perform web research to find permit processing times from official sources, contractor reports, and comparable city data—then rank cities by speed."
|
|
15
7
|
},
|
|
16
8
|
{
|
|
17
9
|
"cell_type": "code",
|
|
@@ -795,6 +787,9 @@
|
|
|
795
787
|
"nbconvert_exporter": "python",
|
|
796
788
|
"pygments_lexer": "ipython3",
|
|
797
789
|
"version": "3.14.2"
|
|
790
|
+
},
|
|
791
|
+
"everyrow": {
|
|
792
|
+
"description": "Python notebook where LLM agents research residential permit processing times across 30 Texas cities. Agents gather data from official municipal websites, contractor reports, and comparable city estimates, then rank by speed."
|
|
798
793
|
}
|
|
799
794
|
},
|
|
800
795
|
"nbformat": 4,
|
{everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/score-leads-from-fragmented-data/notebook.ipynb
RENAMED
|
@@ -3,15 +3,7 @@
|
|
|
3
3
|
{
|
|
4
4
|
"cell_type": "markdown",
|
|
5
5
|
"metadata": {},
|
|
6
|
-
"source":
|
|
7
|
-
"# Lead Scoring: Data Fragmentation Risk\n",
|
|
8
|
-
"\n",
|
|
9
|
-
"This notebook demonstrates using everyrow's `rank()` utility to score B2B leads by their likelihood of suffering from data fragmentation challenges.\n",
|
|
10
|
-
"\n",
|
|
11
|
-
"**Use Case:** A data integration SaaS company wants to prioritize leads. Companies operating across multiple locations, entities, or point solutions are more likely to need data integration tools.\n",
|
|
12
|
-
"\n",
|
|
13
|
-
"**Why everyrow?** Traditional enrichment tools provide data fields but can't interpret them. Manual review of 1,000 leads is prohibitively slow. everyrow's `rank()` analyzes each company's operational complexity semantically."
|
|
14
|
-
]
|
|
6
|
+
"source": "# How to score and prioritize leads with AI in Python\n\nThis notebook demonstrates using everyrow's `rank()` utility to score B2B leads by their likelihood of suffering from data fragmentation challenges.\n\n**Use Case:** A data integration SaaS company wants to prioritize leads. Companies operating across multiple locations, entities, or point solutions are more likely to need data integration tools.\n\n**Why everyrow?** Traditional enrichment tools provide data fields but can't interpret them. Manual review of 1,000 leads is prohibitively slow. everyrow's `rank()` analyzes each company's operational complexity semantically."
|
|
15
7
|
},
|
|
16
8
|
{
|
|
17
9
|
"cell_type": "code",
|
|
@@ -722,6 +714,9 @@
|
|
|
722
714
|
"nbconvert_exporter": "python",
|
|
723
715
|
"pygments_lexer": "ipython3",
|
|
724
716
|
"version": "3.14.2"
|
|
717
|
+
},
|
|
718
|
+
"everyrow": {
|
|
719
|
+
"description": "Python notebook scoring 20 B2B companies by data fragmentation risk. LLM agents analyze each company's operational complexity, M&A history, multi-location operations, and system diversity to prioritize sales outreach."
|
|
725
720
|
}
|
|
726
721
|
},
|
|
727
722
|
"nbformat": 4,
|
{everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/score-leads-without-crm-history/notebook.ipynb
RENAMED
|
@@ -3,15 +3,7 @@
|
|
|
3
3
|
{
|
|
4
4
|
"cell_type": "markdown",
|
|
5
5
|
"metadata": {},
|
|
6
|
-
"source":
|
|
7
|
-
"# Lead Scoring Without CRM\n",
|
|
8
|
-
"\n",
|
|
9
|
-
"This notebook demonstrates using everyrow's `rank()` utility to score investment firms by their likelihood to purchase research tools—without needing CRM data or prior interactions.\n",
|
|
10
|
-
"\n",
|
|
11
|
-
"**Use Case:** A research tools company wants to rank investment firms by product fit. Traditional approaches either require expensive CRM integrations or burn credits on enrichment tools that provide data without interpretation.\n",
|
|
12
|
-
"\n",
|
|
13
|
-
"**Why everyrow?** The `rank()` function can analyze public information (website descriptions, investment focus, team characteristics) and provide both a score AND reasoning for why a firm was scored a certain way."
|
|
14
|
-
]
|
|
6
|
+
"source": "# Score and rank leads without a CRM in Python\n\nThis notebook demonstrates using everyrow's `rank()` utility to score investment firms by their likelihood to purchase research tools—without needing CRM data or prior interactions.\n\n**Use Case:** A research tools company wants to rank investment firms by product fit. Traditional approaches either require expensive CRM integrations or burn credits on enrichment tools that provide data without interpretation.\n\n**Why everyrow?** The `rank()` function can analyze public information (website descriptions, investment focus, team characteristics) and provide both a score AND reasoning for why a firm was scored a certain way."
|
|
15
7
|
},
|
|
16
8
|
{
|
|
17
9
|
"cell_type": "code",
|
|
@@ -732,6 +724,9 @@
|
|
|
732
724
|
"nbconvert_exporter": "python",
|
|
733
725
|
"pygments_lexer": "ipython3",
|
|
734
726
|
"version": "3.14.2"
|
|
727
|
+
},
|
|
728
|
+
"everyrow": {
|
|
729
|
+
"description": "Python notebook ranking 15 investment firms by research-tool purchase likelihood using only public descriptions. No CRM data needed—LLM agents analyze strategy, team size, and research intensity to generate scores."
|
|
735
730
|
}
|
|
736
731
|
},
|
|
737
732
|
"nbformat": 4,
|
{everyrow-0.1.10 → everyrow-0.2.0}/docs/case_studies/screen-job-postings-by-criteria/notebook.ipynb
RENAMED
|
@@ -3,18 +3,7 @@
|
|
|
3
3
|
{
|
|
4
4
|
"cell_type": "markdown",
|
|
5
5
|
"metadata": {},
|
|
6
|
-
"source":
|
|
7
|
-
"# Job Posting Screening\n",
|
|
8
|
-
"\n",
|
|
9
|
-
"This notebook demonstrates using everyrow's `screen()` utility to filter job postings by semantic criteria that traditional regex/keyword matching struggles with.\n",
|
|
10
|
-
"\n",
|
|
11
|
-
"**Use Case:** Filter job postings from a \"Who's Hiring\" thread to find only those that meet ALL of:\n",
|
|
12
|
-
"1. Remote-friendly (explicitly allows remote/hybrid/distributed work)\n",
|
|
13
|
-
"2. Senior-level (title or requirements indicate 5+ years experience)\n",
|
|
14
|
-
"3. Salary disclosed (specific compensation figures, not \"competitive\" or \"DOE\")\n",
|
|
15
|
-
"\n",
|
|
16
|
-
"**Why everyrow?** Traditional keyword matching achieves ~68% precision on this task. Semantic screening with everyrow achieves >90% precision by understanding context and intent."
|
|
17
|
-
]
|
|
6
|
+
"source": "# How to filter job postings with LLM Agents\n\nThis notebook demonstrates using everyrow's `screen()` utility to filter job postings by semantic criteria that traditional regex/keyword matching struggles with.\n\n**Use Case:** Filter job postings from a \"Who's Hiring\" thread to find only those that meet ALL of:\n1. Remote-friendly (explicitly allows remote/hybrid/distributed work)\n2. Senior-level (title or requirements indicate 5+ years experience)\n3. Salary disclosed (specific compensation figures, not \"competitive\" or \"DOE\")\n\n**Why everyrow?** Traditional keyword matching achieves ~68% precision on this task. Semantic screening with everyrow achieves >90% precision by understanding context and intent."
|
|
18
7
|
},
|
|
19
8
|
{
|
|
20
9
|
"cell_type": "code",
|
|
@@ -518,6 +507,9 @@
|
|
|
518
507
|
"nbconvert_exporter": "python",
|
|
519
508
|
"pygments_lexer": "ipython3",
|
|
520
509
|
"version": "3.14.2"
|
|
510
|
+
},
|
|
511
|
+
"everyrow": {
|
|
512
|
+
"description": "Python notebook filtering job postings by three semantic criteria at once: remote-friendly, senior-level, and salary disclosed. LLM agents evaluate context and intent, achieving over 90% precision versus 68% from keyword matching."
|
|
521
513
|
}
|
|
522
514
|
},
|
|
523
515
|
"nbformat": 4,
|