earningscall 1.2.0__tar.gz → 1.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. {earningscall-1.2.0 → earningscall-1.2.1}/CHANGELOG.md +4 -0
  2. {earningscall-1.2.0 → earningscall-1.2.1}/PKG-INFO +15 -1
  3. {earningscall-1.2.0 → earningscall-1.2.1}/README.md +14 -0
  4. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/company.py +4 -1
  5. {earningscall-1.2.0 → earningscall-1.2.1}/pyproject.toml +1 -1
  6. earningscall-1.2.1/tests/data/nvda-q2-2025-level-4-data-missing.yaml +679 -0
  7. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_get_transcript.py +25 -2
  8. {earningscall-1.2.0 → earningscall-1.2.1}/.github/workflows/release.yml +0 -0
  9. {earningscall-1.2.0 → earningscall-1.2.1}/.github/workflows/test.yml +0 -0
  10. {earningscall-1.2.0 → earningscall-1.2.1}/.gitignore +0 -0
  11. {earningscall-1.2.0 → earningscall-1.2.1}/.python-version +0 -0
  12. {earningscall-1.2.0 → earningscall-1.2.1}/DEVELOPMENT.md +0 -0
  13. {earningscall-1.2.0 → earningscall-1.2.1}/LICENSE +0 -0
  14. {earningscall-1.2.0 → earningscall-1.2.1}/TODO.md +0 -0
  15. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/__init__.py +0 -0
  16. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/api.py +0 -0
  17. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/calendar.py +0 -0
  18. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/errors.py +0 -0
  19. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/event.py +0 -0
  20. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/exports.py +0 -0
  21. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/sectors.py +0 -0
  22. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/symbols.py +0 -0
  23. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/transcript.py +0 -0
  24. {earningscall-1.2.0 → earningscall-1.2.1}/earningscall/utils.py +0 -0
  25. {earningscall-1.2.0 → earningscall-1.2.1}/hatch.toml +0 -0
  26. {earningscall-1.2.0 → earningscall-1.2.1}/scripts/download_audio_files.py +0 -0
  27. {earningscall-1.2.0 → earningscall-1.2.1}/scripts/download_single_audio_file.py +0 -0
  28. {earningscall-1.2.0 → earningscall-1.2.1}/scripts/download_sp500_audio_files.py +0 -0
  29. {earningscall-1.2.0 → earningscall-1.2.1}/scripts/get_all_company_transcripts.py +0 -0
  30. {earningscall-1.2.0 → earningscall-1.2.1}/scripts/get_all_sp500_transcript_texts.py +0 -0
  31. {earningscall-1.2.0 → earningscall-1.2.1}/scripts/get_calendar.py +0 -0
  32. {earningscall-1.2.0 → earningscall-1.2.1}/scripts/get_single_transcript.py +0 -0
  33. {earningscall-1.2.0 → earningscall-1.2.1}/scripts/list_companies.py +0 -0
  34. {earningscall-1.2.0 → earningscall-1.2.1}/setup.cfg +0 -0
  35. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/aapl-q1-2022-advanced-data-level-2.yaml +0 -0
  36. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/aapl-q1-2022-advanced-data-level-3.yaml +0 -0
  37. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/aapl-q1-2022-advanced-data-level-4.yaml +0 -0
  38. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/aapl-q1-2022-speaker-name-map-v2.yaml +0 -0
  39. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/aapl-q1-2030-not-authorized-l2.yaml +0 -0
  40. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/aapl-q1-2030-not-authorized.yaml +0 -0
  41. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/aapl-q1-2030-not-found.yaml +0 -0
  42. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/aapl-q1-2030-server-error.yaml +0 -0
  43. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/demo-symbols-v2-alpha.yaml +0 -0
  44. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/demo-symbols-v2.yaml +0 -0
  45. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/get-calendar-500-error.yaml +0 -0
  46. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/get-calendar-not-found-response.yaml +0 -0
  47. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/get-calendar-successful-response.yaml +0 -0
  48. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/meta-q3-2024-not-authorized.yaml +0 -0
  49. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/meta-q3-2024-not-found.yaml +0 -0
  50. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/meta-q3-2024-other-error.yaml +0 -0
  51. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/msft-company-events.yaml +0 -0
  52. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/msft-q1-2022-audio-file-short-clip.yaml +0 -0
  53. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/msft-transcript-response.yaml +0 -0
  54. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/sp500-company-list-failed.yaml +0 -0
  55. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/sp500-company-list.yaml +0 -0
  56. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/symbols-v2-missing-edge-cases.yaml +0 -0
  57. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/symbols-v2.yaml +0 -0
  58. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/symbols.txt +0 -0
  59. {earningscall-1.2.0 → earningscall-1.2.1}/tests/data/symbols.yaml +0 -0
  60. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_api.py +0 -0
  61. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_company.py +0 -0
  62. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_download_audio_files.py +0 -0
  63. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_earnings_event.py +0 -0
  64. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_errors.py +0 -0
  65. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_exports.py +0 -0
  66. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_get_calendar.py +0 -0
  67. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_get_company_events.py +0 -0
  68. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_get_sp500_companies_api.py +0 -0
  69. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_helper.py +0 -0
  70. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_responses_mocking.py +0 -0
  71. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_sectors.py +0 -0
  72. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_symbols.py +0 -0
  73. {earningscall-1.2.0 → earningscall-1.2.1}/tests/test_utils.py +0 -0
@@ -1,5 +1,9 @@
1
1
  # Changelog
2
2
 
3
+ ## Release `1.2.1` - 2025-04-04
4
+
5
+ * Bugfix: handle case where Q&A section is missing from the transcript for level 4 transcription.
6
+
3
7
  ## Release `1.2.0` - 2025-02-12
4
8
 
5
9
  * Add `get_calendar` function to get the calendar for a given date.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: earningscall
3
- Version: 1.2.0
3
+ Version: 1.2.1
4
4
  Summary: The EarningsCall Python library provides convenient access to the EarningsCall API. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses.
5
5
  Project-URL: Homepage, https://earningscall.biz
6
6
  Project-URL: Documentation, https://github.com/EarningsCall/earningscall-python
@@ -273,6 +273,20 @@ for event in calendar:
273
273
  print(f"{event.company_name} - Q{event.quarter} {event.year} on: {event.conference_date.astimezone().isoformat()} Transcript Ready: {event.transcript_ready}")
274
274
  ```
275
275
 
276
+ Output
277
+
278
+ ```text
279
+ Tilray Brands, Inc. - Q2 2025 on: 2025-01-10T07:30:00-06:00 Transcript Ready: True
280
+ Walgreens Boots Alliance, Inc. - Q1 2025 on: 2025-01-10T07:30:00-06:00 Transcript Ready: True
281
+ Neogen Corporation - Q2 2025 on: 2025-01-10T07:30:00-06:00 Transcript Ready: True
282
+ E2open Parent Holdings, Inc. - Q3 2025 on: 2025-01-10T07:30:00-06:00 Transcript Ready: True
283
+ TD SYNNEX Corporation - Q4 2024 on: 2025-01-10T08:00:00-06:00 Transcript Ready: True
284
+ Delta Air Lines, Inc. - Q4 2024 on: 2025-01-10T09:00:00-06:00 Transcript Ready: True
285
+ Constellation Brands, Inc. - Q3 2025 on: 2025-01-10T09:30:00-06:00 Transcript Ready: True
286
+ PriceSmart, Inc. - Q1 2025 on: 2025-01-10T11:00:00-06:00 Transcript Ready: True
287
+ KORU Medical Systems, Inc. - Q4 2024 on: 2025-01-10T15:30:00-06:00 Transcript Ready: True
288
+ WD-40 Company - Q1 2025 on: 2025-01-10T16:00:00-06:00 Transcript Ready: True
289
+ ```
276
290
 
277
291
  ## List All Companies
278
292
 
@@ -221,6 +221,20 @@ for event in calendar:
221
221
  print(f"{event.company_name} - Q{event.quarter} {event.year} on: {event.conference_date.astimezone().isoformat()} Transcript Ready: {event.transcript_ready}")
222
222
  ```
223
223
 
224
+ Output
225
+
226
+ ```text
227
+ Tilray Brands, Inc. - Q2 2025 on: 2025-01-10T07:30:00-06:00 Transcript Ready: True
228
+ Walgreens Boots Alliance, Inc. - Q1 2025 on: 2025-01-10T07:30:00-06:00 Transcript Ready: True
229
+ Neogen Corporation - Q2 2025 on: 2025-01-10T07:30:00-06:00 Transcript Ready: True
230
+ E2open Parent Holdings, Inc. - Q3 2025 on: 2025-01-10T07:30:00-06:00 Transcript Ready: True
231
+ TD SYNNEX Corporation - Q4 2024 on: 2025-01-10T08:00:00-06:00 Transcript Ready: True
232
+ Delta Air Lines, Inc. - Q4 2024 on: 2025-01-10T09:00:00-06:00 Transcript Ready: True
233
+ Constellation Brands, Inc. - Q3 2025 on: 2025-01-10T09:30:00-06:00 Transcript Ready: True
234
+ PriceSmart, Inc. - Q1 2025 on: 2025-01-10T11:00:00-06:00 Transcript Ready: True
235
+ KORU Medical Systems, Inc. - Q4 2024 on: 2025-01-10T15:30:00-06:00 Transcript Ready: True
236
+ WD-40 Company - Q1 2025 on: 2025-01-10T16:00:00-06:00 Transcript Ready: True
237
+ ```
224
238
 
225
239
  ## List All Companies
226
240
 
@@ -93,7 +93,10 @@ class Company:
93
93
  if 2 <= level <= 3:
94
94
  transcript.text = " ".join(map(lambda spk: spk.text, transcript.speakers))
95
95
  elif level == 4:
96
- transcript.text = " ".join([transcript.prepared_remarks, transcript.questions_and_answers])
96
+ if transcript.questions_and_answers:
97
+ transcript.text = " ".join([transcript.prepared_remarks, transcript.questions_and_answers])
98
+ else:
99
+ transcript.text = transcript.prepared_remarks
97
100
  if transcript.speaker_name_map_v2:
98
101
  for speaker in transcript.speakers:
99
102
  speaker.speaker_info = transcript.speaker_name_map_v2.get(speaker.speaker)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "earningscall"
3
- version = "1.2.0"
3
+ version = "1.2.1"
4
4
  description = "The EarningsCall Python library provides convenient access to the EarningsCall API. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses."
5
5
  readme = "README.md"
6
6
  authors = [{ name = "EarningsCall", email = "dev@earningscall.biz" }]
@@ -0,0 +1,679 @@
1
+ responses:
2
+ - response:
3
+ auto_calculate_content_length: false
4
+ body: '{"event": {"year": 2025, "quarter": 2, "conference_date": "2024-08-28T17:00:00.000-04:00"},
5
+ "prepared_remarks": "Good afternoon, everyone, and welcome to NVIDIA''s conference
6
+ call for the second quarter of fiscal 2025. With me today from NVIDIA are Jensen
7
+ Wong, President and Chief Executive Officer, and Colette Kress, Executive Vice
8
+ President and Chief Financial Officer. I would like to remind you that our call
9
+ is being webcast live on NVIDIA''s Investor Relations website. The webcast will
10
+ be available for replay until the conference call to discuss our financial results
11
+ for the third quarter of fiscal 2025. The content of today''s call is NVIDIA''s
12
+ property. It cannot be reproduced or transcribed without prior written consent.
13
+ During this call, we may make forward-looking statements based on current expectation.
14
+ These are subject to a number of risks, significant risks and uncertainties,
15
+ and our actual results may differ materially. For discussion of factors that
16
+ could affect our future financial results and business, please refer to the
17
+ disclosure in today''s earnings release, our most recent Forms 10-K and 10-Q,
18
+ and the reports that we may file on Form 8-K with the Securities and Exchange
19
+ Commission. All our statements are made as of today, August 28, 2024, based
20
+ on information currently available to us. Except as required by law, we assume
21
+ no obligation to update any such statements. During this call, we will discuss
22
+ non-GAAP financial measures. You can find a reconciliation of these non-GAAP
23
+ financial measures to GAAP financial measures in our CFO commentary, which is
24
+ posted on our website. Let me highlight an upcoming event for the financial
25
+ community. We will be attending the Goldman Sachs Communicopia and Technology
26
+ Conference on September 11th in San Francisco, where Jensen will participate
27
+ in a keynote fireside chat. Our earnings call to discuss the results of our
28
+ third quarter of fiscal 2025 is scheduled for Wednesday, November 20th, 2024.
29
+ With that, let me turn the call over to Collette. Thanks, Stuart. Q2 was another
30
+ record quarter. Revenue of $30 billion was up 15% sequentially and up 122% year-on-year,
31
+ and well above our outlook of $28 billion. Starting with data center. Data center
32
+ revenue of $26.3 billion was a record, up 16% sequentially and up 154% year-on-year,
33
+ driven by strong demand for NVIDIA Hopper GPU computing and our networking platforms.
34
+ Compute revenue grew more than 2.5x. Networking revenue grew more than 2x from
35
+ the last year. Cloud service providers represented roughly 45% of our data center
36
+ revenue, and more than 50% stem from the consumer, internet, and enterprise
37
+ companies. Customers continue to accelerate their Hopper architecture purchases
38
+ while gearing up to adopt Blackwell. Key workloads driving our data center growth
39
+ include generative AI, model training and inferencing, video, image, and text
40
+ data, pre- and post-processing with CUDA and AI workloads, synthetic data generation,
41
+ AI-powered recommender systems, SQL and vector database processing as well.
42
+ Next generation models will require 10 to 20 times more compute to train with
43
+ significantly more data. The trend is expected to continue. Over the trailing
44
+ four quarters, we estimate that inference drove more than 40% of our data center
45
+ revenue. CSPs, consumer internet companies, and enterprises benefit from the
46
+ incredible throughput and efficiency of NVIDIA''s inference platform. Demand
47
+ for NVIDIA is coming from frontier model makers, consumer internet services,
48
+ and tens of thousands of companies and startups building generative AI applications
49
+ for consumers, advertising, education, enterprise and healthcare, and robotics.
50
+ desire NVIDIA''s rich ecosystem and availability in every cloud. CSPs appreciate
51
+ the broad adoption of NVIDIA and are growing their NVIDIA capacity given the
52
+ high demand. NVIDIA H200 platform began ramping in Q2, shipping to large CSPs,
53
+ consumer internet and enterprise companies. The NVIDIA H200 builds upon the
54
+ strength of our Hopper architecture and offering over 40% more memory bandwidth
55
+ compared to the H100. Our data center revenue in China grew sequentially in
56
+ Q2 and is a significant contributor to our data center revenue. As a percentage
57
+ of total data center revenue, it remains below levels seen prior to the imposition
58
+ of export controls. We continue to expect the China market to be very competitive
59
+ going forward. The latest round of MLPerf inference benchmarks highlighted NVIDIA''s
60
+ inference leadership with both NVIDIA, Hopper, and Blackwell platforms combining
61
+ to win gold medals on all tasks. At Computex, NVIDIA with the top computer manufacturers
62
+ unveiled an array of Blackwell architecture-powered systems and NVIDIA networking
63
+ for building AI factories and data centers. With the NVIDIA MGX modular reference
64
+ architecture, our OEMs and ODM partners are building more than 100 Blackwell-based
65
+ systems designed quickly and cost-effectively. The NVIDIA Blackwell platform
66
+ brings together multiple GPU, CPU, DPU, NVLink, NVLink Switch, and the networking
67
+ chips, systems, and NVIDIA CUDA software to power the next generation of AI
68
+ across the cases, industries, and countries. The NVIDIA GB200 NVL72 system with
69
+ the fifth generation NVLink enables all 72 GPUs to act as a single GPU and deliver
70
+ up to 30 times faster inference for LLMs, workloads, and unlocking the ability
71
+ to run trillion parameter models in real time. Copper demand is strong and Blackwell
72
+ is widely sampling. We executed a change to the Blackwell GPU mass to improve
73
+ production yields. Blackwell production ramp is scheduled to begin in the fourth
74
+ quarter and continue into fiscal year 26. In Q4, we expect to ship several billion
75
+ dollars in Blackwell revenue. Hopper shipments are expected to increase in the
76
+ second half of fiscal 2025. Hopper supply and availability have improved. Demand
77
+ for Blackwell platforms is well above supply, and we expect this to continue
78
+ into next year. Networking revenue increased 16% sequentially. Our Ethernet
79
+ for AI revenue, which includes our SpectrumX end-to-end Ethernet platform, doubled
80
+ sequentially with hundreds of customers adopting our Ethernet offerings. SpectrumX
81
+ has broad market support from OEM and ODM partners and is being adopted by CSPs,
82
+ GPU cloud providers, and enterprise, including XAI, to connect the largest GPU
83
+ compute cluster in the world. SpectrumX supercharges Ethernet for AI processing
84
+ and delivers 1.6x the performance of traditional Ethernet. We plan to launch
85
+ new SpectrumX products every year to support demand for scaling compute clusters
86
+ from tens of thousands of DPUs today to millions of DPUs in the near future.
87
+ SpectrumX is well on track to begin a multi-billion dollar product line within
88
+ a year. Our sovereign AI opportunities continue to expand as countries recognize
89
+ AI expertise and infrastructure at national imperatives for their society and
90
+ industries. Japan''s National Institute of Advanced Industrial Science and Technology
91
+ is building its AI bridging cloud infrastructure 3.0 supercomputer with NVIDIA.
92
+ We believe sovereign AI revenue will reach low double-digit billions this year.
93
+ The enterprise AI wave has started. Enterprises also drove sequential revenue
94
+ growth in the quarter. We are working with most of the Fortune 100 companies
95
+ on AI initiatives across industries and geographies. A range of applications
96
+ are fueling our growth, including AI-powered chatbots, generative AI co-pilots,
97
+ and agents to build new monetizable business applications and enhance employee
98
+ productivity. Amdocs is using NVIDIA generative AI for their smart agent, transforming
99
+ the customer experience and reducing customer service costs by 30%. ServiceNow
100
+ is using NVIDIA for its Now Assist offering, the fastest growing new product
101
+ in the company''s history. SAP is using NVIDIA to build dual co-pilots. Cohesity
102
+ is using NVIDIA to build their generative AI agent and lower generative AI development
103
+ costs. Snowflake, who serves over 3 billion queries a day for over 10,000 enterprise
104
+ customers, is working with NVIDIA to build co-pilots. And lastly, Wistron is
105
+ using NVIDIA AI Omniverse to reduce end-to-end cycle times for their factories
106
+ by 50%. Automotive was a key growth driver for the quarter, as every automaker
107
+ developing autonomous vehicle technology is using NVIDIA in their data centers.
108
+ Automotive will drive multi-billion dollars in revenue across on-prem and cloud
109
+ consumption and will grow as next generation AV models require significantly
110
+ more compute. Healthcare is also on its way to being a multi-billion dollar
111
+ business as AI revolutionizes medical imaging, surgical robots, patient care,
112
+ electronic health, record processing, and drug discovery. During the quarter,
113
+ we announced a new NVIDIA AI Foundry service to supercharge generative AI for
114
+ the world''s enterprises with Meta''s LAMA 3.1 collection of models. This marks
115
+ a watershed moment for enterprise AI. Companies for the first time can leverage
116
+ the capabilities of an open source frontier level model to develop customized
117
+ AI applications to encode their institutional knowledge into an AI flywheel
118
+ to automate and accelerate their business. Accenture is the first to adopt the
119
+ new service to build custom LAMA 3.1 models for both its own use and to assist
120
+ clients seeking to deploy generative AI applications. NVIDIA NIMS accelerate
121
+ and simplify model deployment. Companies across healthcare, energy, financial
122
+ services, retail, transportation, and telecommunications are adopting NIMS,
123
+ including Aramco, Lowe''s, and Uber. AT&T realized 70% cost savings and eight
124
+ times latency reduction after moving into NIMS for generative AI, call transcription,
125
+ and classification. Over 150 partners are embedding NIMS across every layer
126
+ of the AI ecosystem. We announced NIM agent blueprints, a catalog of customizable
127
+ reference applications that include a full suite of software for building and
128
+ deploying enterprise generative AI applications. With NIM agent blueprints,
129
+ enterprises can refine their AI applications over time, creating a data-driven
130
+ AI flywheel. The first NIM agent blueprints include workloads for customer service,
131
+ computer-aided drug discovery, and enterprise retrieval augmented generation.
132
+ Our system integrators, technology solution providers, and system builders are
133
+ bringing NVIDIA NIM agent blueprints to enterprises. NVIDIA NIM and NIM agent
134
+ blueprints are available through the NVIDIA AI enterprise software platform,
135
+ which has great momentum. We expect our software SaaS, and support revenue to
136
+ approach a $2 billion annual run rate exiting this year, with NVIDIA AI Enterprise
137
+ notably contributing to growth. Moving to gaming and AI PCs. Gaming revenue
138
+ of $2.88 billion increased 9% sequentially and 16% year on year. We saw sequential
139
+ growth in console, notebook, and desktop revenue, and demand is strong and growing,
140
+ and channel inventory remains healthy. Every PC with RTX is an AI PC. RTX PCs
141
+ can deliver up to 1,300 AI tops, and there are now over 200 RTX AI laptops designed
142
+ from leading PC manufacturers. With 600 AI-powered applications and games and
143
+ an installed base of 100 million devices, RTX is set to revolutionize consumer
144
+ experiences with generative AI. NVIDIA ACE, a suite of generative AI technologies,
145
+ is available for RTX AI PCs. Mega Break is the first game to use NVIDIA ACE,
146
+ including our small language model, Minitron 4B, optimized on device inference.
147
+ The NVIDIA gaming ecosystem continues to grow. Recently added RTX and DLSS titles,
148
+ including Indiana Jones and The Great Circle, Dune Awakening, and Dragon Age,
149
+ The Veil Guard. The GeForce Now library continues to expand with total catalog
150
+ size of over 2,000 titles, the most content of any cloud gaming service. moving
151
+ to pro-visualization. Revenue of $454 million was up 6% sequentially and 20%
152
+ year-on-year. Demand is being driven by AI and graphic use cases, including
153
+ model fine tuning and Omniverse-related workloads. Automotive and manufacturing
154
+ were among the key industry verticals driving growth this quarter. Companies
155
+ are racing to digitalize workflows to drive efficiency across their operations.
156
+ The world''s largest electronics manufacturer, Foxconn, is using NVIDIA Omniverse
157
+ to power digital twins of the physical plants that produce NVIDIA black hole
158
+ systems. And several large global enterprises, including Mercedes-Benz, signed
159
+ multi-year contracts for NVIDIA Omniverse Cloud to build industrial digital
160
+ twins of factories. We announced new NVIDIA USD NIMs and connectors to open
161
+ Omniverse to new industries and enable developers to incorporate generative
162
+ AI co-pilots and agents into USD workloads, accelerating their ability to build
163
+ highly accurate virtual worlds. WPP is implementing USD NIM microservices in
164
+ its generative AI-enabled content creation pipeline for customers. such as the
165
+ Coca-Cola Company. Moving to automotive and robotics, revenue was $346 million,
166
+ up 5% sequentially and up 37% year-on-year. Year-on-year growth was driven by
167
+ the new customer ramps in self-driving platforms and increased demand for AI
168
+ cockpit solutions. At the Computer Vision and Pattern Recognition Conference,
169
+ NVIDIA won the Autonomous Grand Challenge in the end-to-end driving at scale
170
+ category, outperforming more than 400 entries worldwide. Austin Dynamics, BYD
171
+ Electronics, Figure Intrinsic, Siemens, Skills 8i, and Teradyne Robotics are
172
+ using the NVIDIA ISAAC Robotics platform for autonomous robot arms, humanoids,
173
+ and mobile robots. Now, moving to the rest of the P&L. Gap gross margins were
174
+ 75.1%, and non-gap gross margins were 75.7%. Down sequentially due to a higher
175
+ mix of new products within data center and inventory provisions for low-yielding
176
+ blackwell material. Sequentially, gap and non-gap operating expenses were up
177
+ 12%, primarily reflecting higher compensation-related costs. Cash flow from
178
+ operations was $14.5 billion. In Q2, we utilized cash of $7.4 billion towards
179
+ shareholder returns in the form of share repurchases and cash dividends, reflecting
180
+ the increase in dividend per share. Our board of directors recently approved
181
+ a $50 billion share repurchase authorization to add to our remaining $7.5 billion
182
+ of authorization at the end of Q2. Let me turn the outlook for the third quarter.
183
+ Total revenue is expected to be 32.5 billion plus or minus 2%. Our third quarter
184
+ revenue outlook incorporates continued growth of our hopper architecture and
185
+ sampling of our Blackwell products. We expect Blackwell production ramp in Q4.
186
+ Gap and non-gap gross margins are expected to be 74.4% and 75% respectively.
187
+ plus or minus 50 basis points. As our data center mix continues the shift to
188
+ new products, we expect this trend to continue into the fourth quarter fiscal
189
+ 2025. For the full year, we expect gross margins to be in the mid-70% range.
190
+ GAAP and non-GAAP operating expenses are expected to be approximately 4.3 billion
191
+ and 3.0 billion, respectively. Full-year operating expenses are expected to
192
+ grow in the mid to upper 40% range as we work on developing our next generation
193
+ of products. GAAP and non-GAAP other income and expenses are expected to be
194
+ about $350 million, including gains and losses from non-affiliated investments
195
+ and publicly held equity securities. GAAP and non-GAAP tax rates are expected
196
+ to be 17%, plus or minus 1%, excluding any discrete items. Further financial
197
+ details are included in the CFO commentary and other information available on
198
+ our IR website. We are now going to open the call for questions. Operator, would
199
+ you please help us and poll for questions? Thank you. And at this time, I would
200
+ like to remind everyone in order to ask a question, press star and then the
201
+ number one on your telephone keypad. We will pause for just a moment to compile
202
+ the Q&A roster. And as a reminder, we ask that you please limit yourself to
203
+ one question. And your first question comes from the line of Vivek Arya with
204
+ Bank of America Securities. Your line is open. Thanks for taking my question.
205
+ Jensen, you mentioned in the prepared comments that there''s a change in the
206
+ Blackwell GPU mask I''m curious, are there any other incremental changes in
207
+ back-end packaging or anything else? And I think related, you suggested that
208
+ you could ship several billion dollars of Blackwell in Q4 despite a change in
209
+ the design. Is it because all these issues will be solved by then? Just help
210
+ us size what is the overall impact of any changes in Blackwell timing, what
211
+ that means to your kind of revenue profile, and how are customers reacting to
212
+ it? Yeah, thanks, Vivek. The change to the mask is complete. There were no functional
213
+ changes necessary. And so we''re sampling functional samples of Blackwell, Grace
214
+ Blackwell, in a variety of system configurations as we speak. There are something
215
+ like 100 different types of Blackwell-based systems that are built that were
216
+ shown at Computext. and we''re enabling our ecosystem to start sampling those.
217
+ The functionality of Blackwell is as it is, and we expect to start production
218
+ in Q4. And your next question comes from the line of Toshi Ahari with Goldman
219
+ Sachs. Your line is open. Hi, thank you so much for taking the question. Jensen,
220
+ I had a relatively longer-term question. As you may know, there''s a pretty
221
+ heated debate in the market on your customers and customers'' customers'' return
222
+ on investment and what that means for the sustainability of CapEx going forward.
223
+ Internally at NVIDIA, what are you guys watching? What''s on your dashboard
224
+ as you try to gauge customer return and and how that impacts CapEx. And then
225
+ a quick follow-up maybe for Colette. I think your sovereign AI number for the
226
+ full year went up maybe a couple billion. What''s driving the improved outlook,
227
+ and how should we think about fiscal 26? Thank you. Thanks, Satoshi. First of
228
+ all, when I said ship production in Q4, I mean shipping out. I don''t mean starting
229
+ to ship, but I mean, I don''t mean starting production, but shipping out. On
230
+ the longer-term question, let''s take a step back And you''ve heard me say that
231
+ we''re going through two simultaneous platform transitions at the same time.
232
+ The first one is transitioning from accelerated computing to, from general purpose
233
+ computing to accelerated computing. And the reason for that is because CPU scaling
234
+ has been known to be slowing for some time. And it is slow to a crawl. And yet
235
+ the amount of computing demand continues to grow quite significantly. You could
236
+ maybe even estimate it to be doubling every single year. And so if we don''t
237
+ have a new approach, computing inflation would be driving up the cost for every
238
+ company, and it would be driving up the energy consumption of data centers around
239
+ the world. In fact, you''re seeing that. And so the answer is accelerated computing.
240
+ We know that accelerated computing, of course, speeds up applications. It also
241
+ enables you to do computing at a much larger scale, for example, scientific
242
+ simulations or database processing. But what that translates directly to is
243
+ lower cost and lower energy consumed. And in fact, this week, there''s a blog
244
+ that came out that talked about a whole bunch of new libraries that we offer.
245
+ And that''s really the core of the first platform transition, going from general
246
+ purpose computing to accelerated computing. And it''s not unusual to see someone
247
+ save 90% of their computing cost. And the reason for that is, of course, you
248
+ just sped up an application 50x. You would expect the computing cost to decline
249
+ quite significantly. The second was enabled by accelerated computing because
250
+ we drove down the cost of training large language models or training deep learning
251
+ so incredibly that it is now possible to have gigantic scale models, multi-trillion
252
+ parameter models, and pre-train it on just about the world''s knowledge corpus
253
+ and let the model go figure out how to understand human language representation
254
+ and how to codify knowledge into its neural networks and how to learn reasoning.
255
+ And so which caused the generative AI revolution? Now, generative AI, taking
256
+ a step back about why it is that we went so deeply into it, is because it''s
257
+ not just a feature. It''s not just a capability. It''s a fundamental new way
258
+ of doing software. Instead of human engineered algorithms, we now have data.
259
+ We tell the AI, we tell the model, we tell the computer, what are the expected
260
+ answers? What are our previous observations? And then for it to figure out what
261
+ the algorithm is, what''s the function, it learns a universal, you know, AI
262
+ is a bit of a universal function approximator, and it learns the function. And
263
+ so you could learn the function of almost anything. And anything that you have
264
+ that''s predictable, anything that has structure, anything that you have previous
265
+ examples of. And so now here we are with generative AI. It''s a fundamental
266
+ new form of computer science. It''s affecting how every layer of computing is
267
+ done from CPU to GPU, from human-engineered algorithms to machine-learned algorithms,
268
+ and the type of applications you could now develop and produce is fundamentally
269
+ remarkable. And there are several things that are happening in generative AI.
270
+ So the first thing that''s happening is the frontier models are growing in quite
271
+ substantial scale. And they''re still seeing, we''re still all seeing the benefits
272
+ of scaling. And whenever you double the size of a model, you also have to more
273
+ than double the size of the data set to go train it. And so the amount of flops
274
+ necessary in order to create that model goes up quadratically. And so it''s
275
+ not unexpected to see that the next generation models could take 20, 10, 20,
276
+ 40 times more compute than last generation. So we have to continue to drive
277
+ the generational performance up quite significantly so we can drive down the
278
+ energy consumed and drive down the cost necessary to do it. So the first one
279
+ is, There are larger frontier models trained on more modalities, and surprisingly,
280
+ there are more frontier model makers than last year. And so you have more on
281
+ more on more. That''s one of the dynamics going on in generative AI. The second
282
+ is, although it''s below the tip of the iceberg, what we see are chat GPT, image
283
+ generators, We see coding. We use generative AI for coding quite extensively
284
+ here at NVIDIA now. We, of course, have a lot of digital designers and things
285
+ like that. But those are kind of the tip of the iceberg. What''s below the iceberg
286
+ are the largest systems, largest computing systems in the world today, which
287
+ are, and you''ve heard me talk about this in the past, which are recommender
288
+ systems moving from CPUs. It''s now moving from CPUs to generative AI. So recommender
289
+ systems, ad generation, custom ad generation, targeting ads at very large scale
290
+ and quite hyper-targeting, search, and user-generated content. These are all
291
+ very large-scale applications have now evolved to generative AI. Of course,
292
+ the number of generative AI startups is generating tens of billions of dollars
293
+ of cloud renting opportunities for our cloud partners. And sovereign AI, countries
294
+ that are now realizing that their data is their natural and national resource,
295
+ and they have to use AI, build their own AI infrastructure so that they could
296
+ have their own digital intelligence. Enterprise AI, as Colette mentioned earlier,
297
+ is starting. And you might have seen our announcement that the world''s leading
298
+ IT company companies are joining us to take the NVIDIA AI Enterprise platform
299
+ to the world''s enterprises. The companies that we''re talking to, so many of
300
+ them are just so incredibly excited to drive more productivity out of their
301
+ company. And then general robotics, the big transformation last year as we are
302
+ able to now learn physical AI from watching video and human demonstration and
303
+ synthetic data generation from reinforcement learning from systems like Omniverse.
304
+ We are now able to work with just about every robotics companies now to start
305
+ thinking about start building general robotics. And so you can see that there
306
+ are just so many different directions that generative AI is going. And so we''re
307
+ actually seeing the momentum of generative AI accelerating. And Tashia, to answer
308
+ your question regarding sovereign AI and our goals in terms of growth, in terms
309
+ of revenue, it certainly is a unique and growing opportunity, something that
310
+ surfaced with generative AI and the desires of countries around the world to
311
+ have their own that would be able to incorporate their own language, incorporate
312
+ their own culture, incorporate their own data in that country. So more and more
313
+ excitement around these models and what they can be specific for those countries.
314
+ So yes, we are seeing some growth opportunity in front of us. And your next
315
+ question comes from the line of Joe Moore with Morgan Stanley. Your line is
316
+ open. Great, thank you. Vincent, in the press release, you talked about Blackwell
317
+ anticipation being incredible, but it seems like Hopper demand is also really
318
+ strong. I mean, you''re guiding for a very strong quarter without Blackwell
319
+ in October. So, you know, how long do you see sort of coexisting strong demand
320
+ for both? And can you talk about the transition to Blackwell? Do you see people
321
+ intermixing clusters? Do you think most of the Blackwell activities, new clusters,
322
+ just some sense of what that transition looks like? Yeah, thanks, Joe. The demand
323
+ for Hopper is really strong. And it''s true, the demand for Blackwell is incredible.
324
+ There''s a couple of reasons for that. The first reason is if you just look
325
+ at the world''s cloud service providers and the amount of GPU capacity they
326
+ have available, it''s basically none. And the reason for that is because they''re
327
+ either being deployed internally for accelerating their own workloads, data
328
+ processing, for example. Data processing, you know, we hardly ever talk about
329
+ it because it''s mundane. You know, it''s not very cool because it doesn''t
330
+ generate a picture or, you know, generate words. But almost every single company
331
+ in the world processes data in the background. And NVIDIA''s GPUs are the only
332
+ accelerators on the planet that process and accelerate data. SQL data, Pandas
333
+ data, data science, toolkits like Pandas and the new one, Polars. These are
334
+ the most popular data processing platforms in the world. And aside from CPUs,
335
+ which, as I''ve mentioned before, really running out of steam, NVIDIA''s accelerated
336
+ computing is really the only way to get boosting performance out of that. And
337
+ so that''s number one, is the primary, the number one use case long before generative
338
+ AI came along is the migration of applications one after another. to accelerated
339
+ computing. The second is, of course, the rentals. They''re renting capacity
340
+ to model makers. They''re renting it to startup companies. And a generative
341
+ AI company spends the vast majority of their invested capital into infrastructure
342
+ so that they could use an AI to help them create products. And so these companies
343
+ need it now. They just simply can''t afford, you know, you just raised money.
344
+ They want you to put it to use now. You have processing that you have to do.
345
+ You can''t do it next year. You got to do it today. And so there''s a fair,
346
+ that''s one reason. The second reason for hopper demand right now is because
347
+ of the race to the next plateau. The first person to the next plateau gets to
348
+ be, you know, gets to introduce a revolutionary level of AI. The second person
349
+ who gets there is incrementally, you know, better or about the same. And so
350
+ the ability to systematically and consistently race to the next plateau and
351
+ be the first one there is how you establish leadership. You know, NVIDIA is
352
+ constantly doing that, and we show that to the world in the GPUs we make and
353
+ the AI factories that we make. the networking systems that we make, the SOCs
354
+ we create. I mean, we want to set the pace. We want to be consistently the world''s
355
+ best. And that''s the reason why we drive ourselves so hard. Of course, we also
356
+ want to see our dreams come true and all of the capabilities that we imagine
357
+ in the future and the benefits that we can bring to society. We want to see
358
+ all that come true. And so... These model makers are the same. Of course, they
359
+ want to be the world''s best. They want to be the world''s first. And although
360
+ Blackwell will start shipping out in billions of dollars at the end of this
361
+ year, the standing up of the capacity is still probably weeks and a month or
362
+ so away. And so between now and then is a lot of generative AI market dynamic.
363
+ And so Everybody is just really in a hurry. It''s either operational reasons
364
+ that they need it. They need accelerated computing. They don''t want to build
365
+ any more general purpose computing infrastructure. And even Hopper, you know,
366
+ of course, H200 is state of the art. Hopper, if you have a choice between building
367
+ CPU infrastructure right now for business or Hopper infrastructure for business
368
+ right now, that decision is relatively clear. And so I think people are just
369
+ clamoring to transition the trillion dollars of established installed infrastructure
370
+ to a modern infrastructure in Hopper''s state of the art. And your next question
371
+ comes from the line of Matt Ramsey with TD Cowan. Your line is open. Thank you
372
+ very much. Good afternoon, everybody. wanted to kind of circle back to an earlier
373
+ question about the debate that investors are having about the ROI on all of
374
+ this capex. And hopefully this question and the distinction will make some some
375
+ sense. But what I''m what I''m having discussions about is with like the percentage
376
+ of folks that you see that are spending all this money and looking to sort of
377
+ push the frontier towards AGI convergence, and as you just said, a new plateau
378
+ in capability. And they''re going to spend regardless to get to that level of
379
+ capability because it opens up so many doors for the industry and for their
380
+ company versus customers that are really, really focused today on CapEx versus
381
+ ROI. I don''t know if that distinction makes sense. I''m just trying to get
382
+ a sense of how you''re seeing the priorities of people that are putting the
383
+ dollars in the ground on this new technology and what their priorities are and
384
+ their timeframes are for that investment. Thanks. Thanks, Matt. The people who
385
+ are investing in NVIDIA infrastructure are getting returns on it right away.
386
+ It''s the best ROI infrastructure, computing infrastructure investment you can
387
+ make today. And so, So one way to think through it, probably the easiest way
388
+ to think through it is just go back to first principles. You have a trillion
389
+ dollars worth of general purpose computing infrastructure. And the question
390
+ is, do you want to build more of that or not? And for every billion dollars
391
+ worth of general CPU based infrastructure that you stand up, you probably rent
392
+ it for less than a billion. And so because it''s commoditized, there''s already
393
+ a trillion dollars on the ground. What''s the point of getting more? And so
394
+ the people who are clamoring to get this infrastructure, one, when they build
395
+ out Hopper-based infrastructure and soon Blackwell-based infrastructure, they
396
+ start saving money. That''s tremendous return on investment. And the reason
397
+ why they start saving money is because data processing saves money. Data processing
398
+ is probably just a giant part of it already. And so recommender systems save
399
+ money. so on and so forth. And so you start saving money. The second thing is
400
+ everything you stand up are going to get rented because so many companies are
401
+ being founded to create generative AI. And so your capacity gets rented right
402
+ away. And the return on investment of that is really good. And then the third
403
+ reason is your own business. You want to either create the next frontier yourself
404
+ or your own internet services benefit from a next-generation ad system or a
405
+ next-generation recommender system or a next-generation search system. So for
406
+ your own services, for your own stores, for your own user-generated content,
407
+ social media platforms, for your own services, generative AI is also a fast
408
+ ROI. And so there''s a lot of ways you could think through it, but at the core,
409
+ it''s because It is the best computing infrastructure you could put in the ground
410
+ today. The world of general purpose computing is shifting to accelerated computing.
411
+ The world of human engineered software is moving to generative AI software.
412
+ If you were to build infrastructure to modernize your cloud and your data centers,
413
+ build it with accelerated computing and NVIDIA. That''s the best way to do it.
414
+ And your next question comes from the line of Timothy Arcuri with UBS. Your
415
+ line is open. Thanks a lot. I had a question on the shape of the revenue growth,
416
+ both near and longer term. I know, Colette, you did increase OPEX for the year.
417
+ And if I look at the increase in your purchase commitments and your supply obligations,
418
+ that''s also quite bullish. On the other hand, there''s some school of thought
419
+ that not that many customers really seem ready for liquid cooling. And I do
420
+ recognize that some of these racks can be air cooled. But, Jensen, is that something
421
+ to consider sort of on the shape of how Blackwell is going to ramp? And then
422
+ I guess when you look beyond next year, which is obviously going to be a great
423
+ year, and you look into 26, do you worry about any other gating factors like,
424
+ say, the power supply chain or at some point models start to get smaller? I''m
425
+ just wondering if you can speak to that. Thanks. I''m going to work backwards.
426
+ I really appreciate the question, Tim. So remember, the world is moving from
427
+ general purpose computing to accelerated computing. And the world builds about
428
+ a trillion dollars worth of data centers. You know, a trillion dollars worth
429
+ of data centers in a few years will be all accelerated computing. In the past,
430
+ no GPUs are in data centers, just CPUs. In the future, every single data center
431
+ will have GPUs. And the reason for that is very clear because we need to accelerate
432
+ workloads so that we can continue to be sustainable, continue to drive down
433
+ the cost of computing so that when we do more computing, we don''t experience
434
+ computing inflation. Second, we need GPUs for a new computing model called generative
435
+ AI that we can all acknowledge is going to be quite transformative to the future
436
+ of computing. And so I think working backwards, the way to think about that
437
+ is the next trillion dollars of the world''s infrastructure will clearly be
438
+ different than the last trillion, and it will be vastly accelerated. With respect
439
+ to the shape of our ramp, we offer multiple configurations of Blackwell. Blackwell
440
+ comes in either a Blackwell classic, if you will, that uses the HGX form factor
441
+ that we pioneered with Volta. And I think it was Volta. And so we''ve been shipping
442
+ the HGX form factor for some time. It is air cooled. The Grace Blackwell is
443
+ liquid cooled. However, the number of data centers that want to go liquid cooled
444
+ is quite significant. And the reason for that is because we can, in a liquid
445
+ cooled data center, In any data center, power limited data center, whatever
446
+ size data center you choose, you could install and deploy anywhere from three
447
+ to five times the AI throughput compared to the past. And so liquid cooling
448
+ is cheaper. Liquid cooling, our TCO is better. And liquid cooling allows you
449
+ to have the benefit of this capability we call MVLink, which allows us to expand
450
+ it to 72 Grace Blackwell packages, which has essentially 144 GPUs. And so imagine
451
+ 144 GPUs connected in MVLink, and we''re increasingly showing you the benefits
452
+ of that. And the next click is obviously very low latency, very high throughput,
453
+ large language model inference. The large MVLink domain is going to be a game
454
+ changer for that. And so I think people are very comfortable deploying both.
455
+ And so almost every CSP we''re working with are deploying some of both. And
456
+ so I''m pretty confident that we''ll ramp it up just fine. Your second question
457
+ out of the third is looking forward, yeah, next year is going to be a great
458
+ year. We expect to grow our data center business quite significantly next year.
459
+ Blackwell is going to be a complete game changer for the industry. And Blackwell
460
+ is going to carry into the following year. And as I mentioned earlier, working
461
+ backwards from first principles, remember that computing is going through two
462
+ platform transitions at the same time. And that''s just really, really important
463
+ to keep your mind focused on, which is general purpose computing is shifting
464
+ to accelerated computing and human engineered software is going to transition
465
+ to generative ai or artificial intelligence learned software okay and your next
466
+ question comes from the line of stacy raskin with bernstein research your line
467
+ is open hi guys thanks for taking my questions i have two short questions for
468
+ colette The first, several billion dollars of Blackwell revenue in Q4, is that
469
+ additive? You said you expected Hopper demand to strengthen in the second half.
470
+ Does that mean Hopper strengthens Q3 to Q4 as well on top of Blackwell adding
471
+ several billion dollars? And the second question on gross margins, if I have
472
+ mid-70s for the year, depending where I want to draw that, if I have 75 for
473
+ the year, I''d be something like 71 to 72. for Q4 somewhere in that range? Is
474
+ that the kind of exit rate for gross margins that you''re expecting? And how
475
+ should we think about the drivers of gross margin evolution in the next year
476
+ as Blackwell ramps and, I mean, hopefully, I guess the yields and the inventory
477
+ reserves and everything come up? This is Stacy. Let''s first take your question
478
+ that you had about Hopper and Blackwell. So we believe our Hopper will continue
479
+ to grow into the second half. We have many new products for Hopper, our existing
480
+ products for Hopper, that we believe will start continuing to ramp in the next
481
+ quarters, including our Q3 and those new products moving to Q4. So let''s say
482
+ Hopper, therefore, versus H1 is a growth opportunity for that. Additionally,
483
+ we have the Blackwell on top of that. and the Blackwell starting of ramping
484
+ in Q4. So, hope that helps you on those two pieces. Your second piece is in
485
+ terms of on our gross margin. We provided gross margin for our Q3. We provided
486
+ our gross margin on a non-GAAP at about 75. We''ll work with all the different
487
+ transitions that we''re going through, but we do believe we can do that 75 in
488
+ Q3. We provided that we''re still on track for the full year, also in the mid-70s
489
+ or approximately the 75. So we''re going to see some slight difference possibly
490
+ in Q4, again, with our transitions and the different cost structures that we
491
+ have on our new product introductions. However, I''m not in the same number
492
+ that you are there. We don''t have exactly guidance, but I do believe you''re
493
+ lower than where we are. And your next question comes from the line of Ben Reitzes
494
+ with Mellius. Your line is open. Yeah. Hey, thanks a lot for the question, Jensen
495
+ and Colette. I wanted to ask about the geographies. There was the 10Q that came
496
+ out and the United States was down sequentially while several Asian geographies
497
+ were up a lot sequentially. Just wondering what the dynamics are there and Um,
498
+ obviously China did very well. You mentioned it, your remarks, what are the
499
+ puts and takes? And then I just wanted to clarify from Stacy''s question, um,
500
+ if that means, uh, the sequential overall revenue growth rates for the company
501
+ accelerate in the fourth quarter, given all those favorable revenue dynamics.
502
+ Thanks. Let me talk about, um, a bit in terms of our disclosure, in terms of
503
+ the time queue, a required disclosure and a choice of geographies. Very challenging
504
+ sometimes to create that right disclosure as we have to come up with one key
505
+ piece. Pieces in terms of we have in terms of who we sell to and or specifically
506
+ who we invoice to. And so what you''re seeing in terms of there is who we invoice.
507
+ That''s not necessarily where the product will eventually be and where it may
508
+ even travel to the end customer. These are just moving to our OEMs, our ODMs,
509
+ and our system integrators for the most part across our product portfolio. So
510
+ what you''re seeing there is sometimes just a swift shift in terms of who they
511
+ are using to complete their full configuration before those things are going
512
+ into the data center, going into notebooks and those pieces of it. And that
513
+ shift happens from time to time. But yes, our China number there, our inverse
514
+ into China, keep in mind that is incorporating both gaming, also data center,
515
+ also automotive in those numbers that we have. Going back to your statement
516
+ regarding gross margin and also what we''re seeing in terms of what we''re looking
517
+ at for Hopper and Blackwell in terms of revenue. Hopper will continue to grow
518
+ in the second half, will continue to grow from what we are currently seeing.
519
+ During determining that exact mix In each Q3 and Q4, we don''t have here. We
520
+ are not here to guide yet in terms of Q4. But we do see right now the demand
521
+ expectations. We do see the visibility that that will be a growth opportunity
522
+ in Q4. On top of that, we will have our Blackwell architecture. And your next
523
+ question comes from the line of CJ Muse with Cantor Fitzgerald. Your line is
524
+ open. Yeah, good afternoon. Thank you for taking the question. You''ve embarked
525
+ on a remarkable annual product cadence with challenges only likely becoming
526
+ more and more given, you know, rising complexity and a radical limit in the
527
+ advanced package world. So curious, you know, if you take a step back, how does
528
+ this backdrop alter your thinking around potentially greater vertical integration,
529
+ supply chain partnerships, and then thinking through consequential impact to
530
+ your margin profile? Thank you. Yeah, thanks. Thanks. Let''s see. I think the
531
+ answer to your first question is that the reason why our velocity is so high
532
+ is simultaneously because the complexity of the model is growing, and we want
533
+ to continue to drive its cost down. It''s growing, so we want to continue to
534
+ increase its scale. And we believe that by continuing to scale the AI models,
535
+ that will reach a level of extraordinary usefulness and that it would open up,
536
+ realize the next industrial revolution. We believe it. And so we''re going to
537
+ drive ourselves really hard to continue to go up that scale. We have the ability,
538
+ fairly uniquely, to integrate to design an AI factory because we have all the
539
+ parts. It''s not possible to come up with a new AI factory every year unless
540
+ you have all the parts. And so next year, we''re going to ship a lot more CPUs
541
+ than we''ve ever had in the history of our company, more GPUs, of course, but
542
+ also NVLink switches, CX, DPUs, ConnectX DPU for East and West, Bluefield DPUs
543
+ for North and South and data and storage processing, to InfiniBand for supercomputing
544
+ centers, to Ethernet, which is a brand new product for us, which is well on
545
+ its way to becoming a multi-billion dollar business to bring AI to Ethernet.
546
+ The fact that we could build, we have access to all of this. We have one architectural
547
+ stack, as you know. It allows us to introduce new capabilities to the market
548
+ as we complete it. Otherwise, what happens, you ship these parts, you go find
549
+ customers to sell it to, and then you''ve got to build, somebody''s got to build
550
+ up an AI factory. And the AI factory has got a mountain of software. And so
551
+ it''s not about who integrates it. We love the fact that our supply chain is
552
+ disintegrated in the sense that we could service, you know, Quanta, Foxconn,
553
+ HP, Dell, Lenovo, Supermicro. We used to be able to service ZT. They were recently
554
+ purchased and so on and so forth. And so the number of ecosystem partners that
555
+ we have Gigabyte, ASUS, the number of ecosystem partners that we have that allows
556
+ them to take our architecture, which all works, but integrate it in a bespoke
557
+ way into all of the world''s cloud service providers, enterprise data centers.
558
+ The scale and reach necessary from our ODMs and our integrator supply chain
559
+ is vast and gigantic because the world is huge. And so that part we don''t want
560
+ to do and we''re not good at doing. But we know how to design the AI infrastructure,
561
+ provide it the way that customers would like it, and let the ecosystem integrate
562
+ it. Well, yeah. So anyways, that''s the reason why. And your final question
563
+ comes from the line of Aaron Rakers with Wells Fargo. Your line is open. Yes,
564
+ thanks for taking the question. I wanted to go back into the Blackwell product
565
+ cycle. One of the questions that we tend to get asked is how you see the rack
566
+ scale system mix dynamic as you think about leveraging NVLink. You think about
567
+ GB, you know, NVL72. And how that go-to-market dynamic looks as far as the Blackwell
568
+ product cycle. I guess I put distinctly, how do you see that mix of rack scale
569
+ systems as we start to think about the Blackwell cycle playing out? Yeah, Aaron,
570
+ thanks. The Blackwell rack system, it''s designed and architected as a rack,
571
+ but it''s sold in disaggregated system components. We don''t sell the whole
572
+ rack. And the reason for that is because everybody''s rack''s a little different,
573
+ surprisingly. Some of them are OCP standards, some of them are not. Some of
574
+ them are enterprise. And the power limits for everybody could be a little different.
575
+ Choice of CDUs, the choice of power bus bars, the configuration and integration
576
+ into people''s data centers, all different. So the way we designed it, we architected
577
+ the whole rack. The software is going to work perfectly across the whole rack.
578
+ And then we provide the system components. Like, for example, the CPU and GPU
579
+ compute board is then integrated into an MGX. It''s a modular system architecture.
580
+ MGX is completely ingenious. And we have MGX ODMs and integrators and OEMs all
581
+ over the plant. And so just about any configuration you would like, where you
582
+ would like that 3,000-pound rack to be delivered, it has to be integrated and
583
+ assembled close to the data center because it''s fairly heavy. And so everything
584
+ from the supply chain, from the moment that we ship the GPU CPUs the switches,
585
+ the NICs. From that point forward, the integration is done quite close to the
586
+ location of the CSPs and the locations of the data centers. And so you can imagine
587
+ how many data centers in the world there are and how many logistics hubs we''ve
588
+ scaled out to with our ODM partners. And so I think because we show it as one
589
+ rack and because it''s always rendered that way, and shown that way, we might
590
+ have left the impression that we''re doing the integration. Our customers hate
591
+ that we do integration. The supply chain hates us doing integration. They want
592
+ to do the integration. That''s their value added. There''s a final design in,
593
+ if you will. It''s not quite as simple as shimmy into a data center, but that
594
+ design fit in is really complicated. And so the design fit in, the installation,
595
+ the bring up, the repair and replace, that entire cycle is done all over the
596
+ world. And we have a sprawling network of ODM and OEM partners that does this
597
+ incredibly well. So integration is not the reason why we''re doing this. racks,
598
+ it''s the anti-reason of doing it. We don''t want to be an integrator. We want
599
+ to be a technology provider. And I will now turn the call back over to Jensen
600
+ Huang for closing remarks. Thank you. Let me make a couple of comments that
601
+ I made earlier again. The data center worldwide are in full steam to modernize
602
+ the entire computing stack with accelerated computing and generative AI. Hopper
603
+ demand remains strong, and the anticipation for Blackwell is incredible. Let
604
+ me highlight the top five things, the top five things of our company. Accelerated
605
+ computing has reached a tipping point. CPU scaling slows. Developers must accelerate
606
+ everything possible. Accelerated computing starts with CUDAx libraries. New
607
+ libraries open new markets for NVIDIA. We released many new libraries, including
608
+ CUDAx accelerated pollers, Pandas, and Spark, the leading data science and data
609
+ processing libraries. QVS for vector databases. This is incredibly hot right
610
+ now. Ariel and Shiona for 5G wireless base station. A whole suite of a whole
611
+ world of data centers that we can go into now. Parabricks for gene sequencing.
612
+ And AlphaFold2 for protein structure prediction is now CUDA accelerated. We
613
+ are at the beginning of our journey to modernize a trillion dollars worth of
614
+ data centers from general purpose computing to accelerated computing. That''s
615
+ number one. Number two, Blackwell is a step function leap over Hopper. Blackwell
616
+ is an AI infrastructure platform, not just a GPU. Also happens to be the name
617
+ of our GPU, but it''s an AI infrastructure platform. As we reveal more of Blackwell
618
+ and sample systems to our partners and customers, the extent of Blackwell''s
619
+ leap becomes clear. The Blackwell vision took nearly five years and seven one-of-a-kind
620
+ chips to realize. The gray CPU, the Blackwell dual GPU in a co-op package, ConnectX
621
+ DPU for east-west traffic, Bluefield DPU for north-south and storage traffic,
622
+ MV-Link switch for all-to-all GPU communications, and Quantum and Spectrum X
623
+ for both InfiniBand and Ethernet, can support the massive burst traffic of AI.
624
+ Blackwell AI factories are building-sized computers. NVIDIA designed and optimized
625
+ the Blackwell platform full-stack, end-to-end, from chips, systems, networking,
626
+ even structured cables, power and cooling, and mountains of software to make
627
+ it fast for customers to build AI factories. These are very capital-intensive
628
+ infrastructures customers want to deploy it as soon as they get their hands
629
+ on the equipment and deliver the best performance and TCO. Blackwell provides
630
+ three to five times more AI throughput in a power-limited data center than Hopper.
631
+ The third is MVLink. This is a very big deal. Its all-to-all GPU switch is game-changing.
632
+ The Blackwell system lets us connect 144 GPUs in 72... GB200 packages into one
633
+ MVLink domain with an aggregate MVLink bandwidth of 259 terabytes per second
634
+ in one rack. Just put that in perspective, that''s about 10 times higher than
635
+ Hopper. 259 terabytes per second kind of makes sense because you need to boost
636
+ the training of multi-trillion parameter models on trillions of tokens. And
637
+ so that natural amount of data needs to be moved around from GPU to GPU. For
638
+ inference, MVLink is vital for low latency, high throughput, large language
639
+ model, token generation. We now have three networking platforms. MVLink for
640
+ GPU scale up, Quantum InfiniBand for supercomputing and dedicated AI factories,
641
+ and SpectrumX for AI on Ethernet. MVLink''s networking footprint is much bigger
642
+ than before. Generative AI momentum is accelerating. Generative AI frontier
643
+ model makers are racing to scale to the next AI plateau to increase model safety
644
+ and IQ. We''re also scaling to understand more modalities from text, images,
645
+ and video to 3D, physics, chemistry, and biology. Chatbots, coding AIs, and
646
+ image generators are growing fast, but it''s just the tip of the iceberg. Internet
647
+ services are deploying generative AI for large-scale recommenders, ad targeting,
648
+ and search systems. AI startups are consuming tens of billions of dollars yearly
649
+ of CSP''s cloud capacity, and countries are recognizing the importance of AI
650
+ and investing in sovereign AI infrastructure. And NVIDIA Omniverse is opening
651
+ up the next era of AI, general robotics. And now the enterprise AI wave has
652
+ started, and we''re poised to help companies transform their businesses. The
653
+ NVIDIA AI Enterprise Platform consists of NEMO, NEMS, NEM Agent Blueprints,
654
+ and AI Foundry that our ecosystem partners, the world-leading IT companies,
655
+ use to help companies customize AI models and build bespoke AI applications.
656
+ Enterprises can then deploy on NVIDIA AI Enterprise runtime. And at $4,500 per
657
+ GPU per year, NVIDIA AI Enterprise is an exceptional value for deploying AI
658
+ anywhere. And for NVIDIA''s software TAM can be significant as the CUDA-compatible
659
+ GPU install base grows from millions to tens of millions. And as Colette mentioned,
660
+ NVIDIA software will exit the year at a $2 billion run rate. Thank you all for
661
+ joining us today. And ladies and gentlemen, this concludes today''s call, and
662
+ we thank you for your participation. You may now disconnect.", "questions_and_answers":
663
+ null}'
664
+ content_type: text/plain
665
+ headers:
666
+ Age: '11801'
667
+ Cache-Control: public, max-age=43200
668
+ ETag: W/"0178672f2a25592fd79f53bdd2fa291c"
669
+ Last-Modified: Thu, 06 Mar 2025 23:05:28 GMT
670
+ Transfer-Encoding: chunked
671
+ Vary: accept-encoding
672
+ Via: 1.1 6b4ec497582bb023ddaea1dc41e55db6.cloudfront.net (CloudFront)
673
+ X-Amz-Cf-Id: 2smAyLzbkNStGuiv8Fm-UOrFTb_ymL6mTsDbKEoSG2p7Trm-FmONAw==
674
+ X-Amz-Cf-Pop: DFW57-P4
675
+ X-Cache: Hit from cloudfront
676
+ x-amz-server-side-encryption: AES256
677
+ method: GET
678
+ status: 200
679
+ url: https://v2.api.earningscall.biz/transcript?apikey=demo&exchange=NASDAQ&symbol=NVDA&year=2025&quarter=2&level=4
@@ -206,6 +206,29 @@ def test_get_demo_company_with_advanced_transcript_data_level_4():
206
206
  assert transcript.speakers is None
207
207
 
208
208
 
209
+ @responses.activate
210
+ def test_get_demo_company_with_advanced_transcript_data_level_4_q_and_a_is_missing():
211
+ ##
212
+ responses._add_from_file(file_path=data_path("symbols-v2.yaml"))
213
+ responses._add_from_file(file_path=data_path("nvda-q2-2025-level-4-data-missing.yaml"))
214
+ ##
215
+ company = get_company("nvda")
216
+ ##
217
+ transcript = company.get_transcript(year=2025, quarter=2, level=4)
218
+ ##
219
+ assert transcript.event.year == 2025
220
+ assert transcript.event.quarter == 2
221
+ assert transcript.event.conference_date.isoformat() == "2024-08-28T17:00:00-04:00"
222
+ assert transcript.text[:100] == (
223
+ "Good afternoon, everyone, and welcome to NVIDIA's conference call for the second quarter of fiscal 2"
224
+ )
225
+ assert transcript.prepared_remarks[:100] == (
226
+ "Good afternoon, everyone, and welcome to NVIDIA's conference call for the second quarter of fiscal 2"
227
+ )
228
+ assert transcript.questions_and_answers is None
229
+ assert transcript.speakers is None
230
+
231
+
209
232
  @responses.activate
210
233
  def test_get_non_demo_company():
211
234
  ##
@@ -393,9 +416,9 @@ def test_get_company_fails_not_authorized():
393
416
  # from responses import _recorder
394
417
  #
395
418
  #
396
- # @_recorder.record(file_path="data/aapl-q1-2022-speaker-name-map-v2-blah.yaml")
419
+ # @_recorder.record(file_path="data/nvda-q2-2025-level-4-data-missing.yaml")
397
420
  # def test_save_symbols_v1_first():
398
- # requests.get("https://v2.api.earningscall.biz/transcript?apikey=demo&exchange=NASDAQ&symbol=AAPL&year=2023&quarter=1&level=2")
421
+ # requests.get("https://v2.api.earningscall.biz/transcript?apikey=demo&exchange=NASDAQ&symbol=NVDA&year=2025&quarter=2&level=4")
399
422
 
400
423
  # Uncomment and run following code to generate demo-symbols-v2.yaml file
401
424
 
File without changes
File without changes
File without changes
File without changes
File without changes