-
Notifications
You must be signed in to change notification settings - Fork 11
Expand file tree
/
Copy pathgenerate-presentation.js
More file actions
executable file
·1960 lines (1685 loc) · 69 KB
/
generate-presentation.js
File metadata and controls
executable file
·1960 lines (1685 loc) · 69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env node
/**
* Presentation Generation Script
*
* Generates presentation slides from MDX course content using AI:
* 1. Parses MDX lesson content
* 2. Uses Claude Code CLI (Opus 4.5) to condense into presentation slides
* 3. Generates structured JSON with slides, speaker notes, and metadata
*
* Modes:
* - Default: Interactive file selection → generate presentation
* - --all: Batch process all files
* - --file <path>: Process specific file
* - --module <name>: Process all files in module directory
* - --debug: Save prompt for validation
*
* Usage:
* node scripts/generate-presentation.js # Interactive
* node scripts/generate-presentation.js --all # Batch: all files
* node scripts/generate-presentation.js --file intro.md # Specific file
*/
import {
readFileSync,
writeFileSync,
mkdirSync,
readdirSync,
statSync,
existsSync,
unlinkSync,
} from "fs";
import { join, relative, dirname, basename, extname } from "path";
import { fileURLToPath } from "url";
import { spawn } from "child_process";
import * as readline from "readline";
import { parseMarkdownContent } from "./lib/markdown-parser.js";
// ES module __dirname equivalent
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// Configuration
const DOCS_DIR = join(__dirname, "../website/docs");
const OUTPUT_DIR = join(__dirname, "output/presentations");
const STATIC_OUTPUT_DIR = join(__dirname, "../website/static/presentations");
const MANIFEST_PATH = join(OUTPUT_DIR, "manifest.json");
const REVEAL_SLIDESHOW_PATH = join(
__dirname,
"../website/src/components/PresentationMode/RevealSlideshow.tsx",
);
/**
* Extract valid visual component names from RevealSlideshow.tsx
* Single source of truth: the VISUAL_COMPONENTS object in the renderer
*/
function getValidVisualComponents() {
const content = readFileSync(REVEAL_SLIDESHOW_PATH, "utf-8");
const match = content.match(
/const VISUAL_COMPONENTS = \{([^}]+)\}/,
);
if (!match) {
throw new Error(
"Could not find VISUAL_COMPONENTS in RevealSlideshow.tsx",
);
}
// Extract component names (the keys of the object)
const componentNames = match[1]
.split(",")
.map((line) => line.trim())
.filter((line) => line && !line.startsWith("//"))
.map((line) => line.split(":")[0].trim());
return componentNames;
}
// Parse command-line arguments
function parseArgs() {
const args = process.argv.slice(2);
const config = {
mode: "interactive",
file: null,
module: null,
debug: false,
};
for (let i = 0; i < args.length; i++) {
const arg = args[i];
if (arg === "--all") {
config.mode = "batch";
} else if (arg === "--file") {
config.mode = "batch";
config.file = args[++i];
} else if (arg === "--module") {
config.mode = "batch";
config.module = args[++i];
} else if (arg === "--debug") {
config.debug = true;
}
}
return config;
}
// ============================================================================
// CONTENT PARSING - Uses shared markdown-parser library
// ============================================================================
// (Functions imported from ./lib/markdown-parser.js)
// ============================================================================
// PRESENTATION PROMPT
// ============================================================================
/**
* Generate presentation slides prompt optimized for Claude Opus 4.5
*/
function buildPresentationPrompt(content, fileName, outputPath) {
return `You are a presentation script writer specializing in educational content for senior software engineers.
TASK: Convert the technical course material below into a concise, visual presentation format for classroom teaching.
TARGET AUDIENCE:
Senior software engineers (3+ years experience) who want practical, production-focused insights. They don't need hand-holding.
PRESENTATION CONTEXT:
This presentation will be shown in a classroom using Reveal.js. The instructor needs:
- Clear, visual slides with minimal text (bullet points, not paragraphs)
- Speaker notes with detailed talking points and timing guidance
- Code examples that demonstrate key concepts
- Logical flow with clear transitions
PRESENTATION STRUCTURE REQUIREMENTS:
✓ DO: Create 8-15 slides total (no more, no less)
✓ DO: Each slide should cover ONE key concept or example
✓ DO: Use bullet points with 3-5 items per slide maximum
✓ DO: Include transitions between conceptual sections
✓ DO: Add speaker notes with:
- Talking points that expand on slide content
- Timing guidance (e.g., "Spend 2-3 minutes on this")
- Discussion prompts or questions to ask students
- Real-world examples to reference
✓ DO: Preserve important code examples as slide content
✓ DO: ONLY use these registered visual components: ${getValidVisualComponents().join(", ")}
✗ DO NOT: Invent or reference visual components not in this list
✓ DO: Generate exactly 4 learning objectives (no more, no less)
✓ DO: Keep each learning objective to 5 words or fewer - THIS IS STRICTLY ENFORCED
- Good: "Master active context engineering" (4 words) ✓
- Bad: "Learn how to master active context" (6 words) ✗
✗ AVOID: Long paragraphs on slides (slides are visual anchors, not reading material)
✗ AVOID: More than 5 bullet points per slide
✗ AVOID: Redundant slides covering the same concept
✗ AVOID: Skipping critical concepts from the lesson
✗ AVOID: Technical jargon without context in speaker notes
SLIDE TYPES:
1. **Title Slide**: Lesson title, learning objectives
2. **Concept Slide**: Key idea with 3-5 bullet points
3. **Code Example Slide**: Code snippet with context
4. **Code Comparison Slide**: Side-by-side code examples (especially for prompt examples)
5. **Code Execution Slide**: Step-by-step visualization of execution flows (agent loops, algorithms, workflows)
6. **Comparison Slide**: Effective vs ineffective patterns (bullet points)
7. **Visual Slide**: ONLY when source has [VISUAL_COMPONENT: X] marker - NEVER invent components
8. **Key Takeaway Slide**: Summary of section or lesson
HANDLING CODE BLOCKS:
The source material includes code blocks tagged like:
- [INEFFECTIVE CODE EXAMPLE: ...] - Shows what NOT to do
- [EFFECTIVE CODE EXAMPLE: ...] - Shows the correct approach
- [CODE PATTERN: ...] - Demonstrates a structure
- [CODE EXAMPLE: ...] - General code reference
For presentation slides:
✓ Include the most illustrative code examples (2-4 per presentation)
✓ Add context in speaker notes about what the code demonstrates
✓ For comparison slides, show ineffective and effective side-by-side
✓ Keep code snippets under 15 lines for readability
✓ EXCEPTION: Textual context flow examples showing agent conversation flows should use "codeExecution" slide type regardless of length (see section below)
✗ Don't include every code example from the lesson
✗ Don't show code without explaining its purpose
HANDLING MARKDOWN TABLES (CRITICAL):
Source material may contain markdown tables (pipe/dash syntax like | Col1 | Col2 |).
Tables are NOT a valid slide format—they're unreadable at presentation scale and
violate the one-concept-per-slide principle.
ABSOLUTE RULE: NEVER put pipe/dash table syntax in code or codeComparison slides.
NEVER use language="markdown" for content containing table structures.
Instead, distill tables into appropriate slide types:
1. Two-category tables → "comparison" slide (neutral=true if both sides are valid)
Extract the key insight from each row as a bullet point.
2. List-of-items tables → "concept" slide
Each row becomes one bullet capturing the essential insight.
3. Sequential/flow tables → "codeExecution" slide
Each row becomes a step with appropriate highlightType.
4. Good-vs-bad tables → "comparison" slide (evaluative, neutral=false)
Left = weak approach, Right = strong approach.
Put the FULL original table in speakerNotes.talkingPoints for instructor reference.
The slide shows the distilled insight; the notes provide the data.
NOTE: language="markdown" IS valid for prompt templates showing what to write to an
AI (e.g., CLAUDE.md examples). It is ONLY prohibited for pipe/dash table content.
CODE FORMATTING FOR PRESENTATIONS:
✓ Include natural line breaks in code and text (use \\n for newlines in JSON strings)
✓ Use standard formatting - post-processing will optimize line lengths automatically
✓ Preserve semantic meaning and don't break mid-identifier or mid-word
✓ DO NOT output literal newlines in JSON strings - always use the \\n escape sequence
NOTE: Line length optimization (60-char limit) is handled automatically by post-processing.
Focus on preserving the exact content and structure from the source material.
<CRITICAL_CONSTRAINT>
SOURCE MATERIAL VERIFICATION
Before generating any slide containing code:
1. Read the source material carefully to locate the code
2. Verify all code examples exist verbatim in the source markdown
3. Copy code exactly as it appears - character for character
ABSOLUTE PROHIBITIONS:
✗ DO NOT generate hypothetical code examples that aren't in the source
✗ DO NOT fabricate implementations that "would result" from prompts shown in the lesson
✗ DO NOT create code to demonstrate "what the AI would produce"
✗ DO NOT show resulting code unless it explicitly exists in the source markdown
CORRECT BEHAVIOR:
✓ If the lesson shows ONLY a prompt example (text describing what to ask AI):
→ Show the prompt as-is using code/codeComparison type with language: "text"
→ DO NOT generate the code that would result from that prompt
→ The prompt itself IS the educational example
✓ If the lesson shows BOTH a prompt AND its resulting code:
→ Show both exactly as they appear in the source
→ Verify both exist in the source before including
✓ If the lesson shows code WITHOUT a preceding prompt:
→ Show the code exactly as it appears in the source
→ Verify it exists before including
EXAMPLES:
❌ WRONG - Fabricating implementation:
Source contains: "Write a TypeScript function that validates email addresses per RFC 5322..."
Slide shows: Complete validateEmail() function with regex and edge case handling
Problem: The implementation was FABRICATED - it doesn't exist in the source
✅ CORRECT - Showing only what exists:
Source contains: "Write a TypeScript function that validates email addresses per RFC 5322..."
Slide shows: The prompt text only (type: "code", language: "text")
Result: Authentic prompt example preserved, no fabrication
Remember: You are extracting and organizing existing content, NOT generating new examples.
</CRITICAL_CONSTRAINT>
<MANDATORY_RULES>
CRITICAL: PRESERVING PROMPT EXAMPLES
When the source material includes prompt examples (text showing what to write to an AI coding assistant):
✓ PRESERVE EXACTLY as shown—do NOT paraphrase, rewrite, or summarize
✓ Use "code" or "codeComparison" slide types, NEVER bullet points
✓ Set language to "text" or "markdown" for prompt examples
✓ Include the FULL prompt text with exact formatting and line breaks
✓ Copy verbatim from source—these are educational examples showing structure
Examples of prompt text that MUST be preserved as code:
- "Write a TypeScript function that validates email addresses..."
- "You are a security engineer. Review this code for..."
- "Calculate the optimal cache size for 1M users..."
For comparison slides showing ineffective vs effective prompts:
✓ Use "codeComparison" type with leftCode/rightCode fields
✓ Set language to "text" for both sides
✓ Copy the EXACT prompt text from the source markdown
✗ Do NOT convert prompts to bullet points
✗ Do NOT summarize or paraphrase prompt text
✗ Do NOT rewrite for "presentation style"—preserve authenticity
Example structure for prompt comparisons:
{
"type": "codeComparison",
"title": "Imperative Commands: Ineffective vs Effective",
"leftCode": {
"label": "Ineffective",
"language": "text",
"code": "Could you help me write a function to validate email addresses?\nThanks in advance!"
},
"rightCode": {
"label": "Effective",
"language": "text",
"code": "Write a TypeScript function that validates email addresses per RFC 5322.\nHandle edge cases:\n- Multiple @ symbols (invalid)\n- Missing domain (invalid)\n- Plus addressing (valid)\n\nReturn { valid: boolean, reason?: string }"
},
"speakerNotes": { ... }
}
</MANDATORY_RULES>
COMMON MISTAKE - DO NOT DO THIS:
❌ WRONG - Converting prompts to bullet points:
{
"type": "concept",
"title": "Action Verbs and Specificity",
"content": [
"Write (not make) → establishes code pattern",
"Debug X in File.ts:47 (not fix) → pinpoints scope",
"Add JSDoc to exported functions (not improve docs) → defines scope"
]
}
✅ CORRECT - Using codeComparison for prompts:
{
"type": "codeComparison",
"title": "Action Verbs and Specificity",
"leftCode": {
"label": "Weak",
"language": "text",
"code": "Make a function\nFix the bug\nUpdate the docs\nImprove performance"
},
"rightCode": {
"label": "Strong",
"language": "text",
"code": "Write a function\nDebug the null pointer exception in UserService.ts:47\nAdd JSDoc comments to all exported functions in auth.ts\nOptimize the query to use indexed columns"
}
}
If you see prompt examples in the source (text showing what to write to an AI), you MUST use "code" or "codeComparison" slide types. NEVER use "concept" with bullet points for prompts.
<VERIFICATION_PROTOCOL>
CHAIN-OF-THOUGHT FOR CODE SLIDES
For each code example you consider including in a slide, follow this verification process:
Step 1: LOCATE - Where does this code appear in the source material?
→ Identify the section and approximate line range
→ If you cannot find it, STOP - do not include this code
Step 2: EXTRACT - Copy the exact text from the source
→ Character-by-character match
→ Preserve all whitespace, formatting, and syntax
Step 3: VERIFY - Does your extracted code match what you're about to include?
→ Compare character by character
→ If there's any mismatch, re-extract from source
Step 4: CONFIRM - Is this code explicitly in the source, or did you generate it?
→ If you generated it (even to "demonstrate" something), DELETE IT
→ Only include code that passed Steps 1-3
Apply this process to:
- Every "code" slide
- Every "codeComparison" leftCode and rightCode
- Every "codeExecution" step that contains code
This verification prevents fabrication and ensures educational integrity.
</VERIFICATION_PROTOCOL>
COMPONENT DETECTION (CRITICAL):
The source content contains markers for visual React components in the format:
[VISUAL_COMPONENT: ComponentName]
Examples you will see:
- [VISUAL_COMPONENT: AbstractShapesVisualization]
- [VISUAL_COMPONENT: CapabilityMatrix]
- [VISUAL_COMPONENT: UShapeAttentionCurve]
- [VISUAL_COMPONENT: ContextWindowMeter]
**MANDATORY RULE:** When you encounter a [VISUAL_COMPONENT: X] marker, you MUST:
1. Generate a "visual" slide type (NOT a "concept" slide)
2. Set "component" field to the exact component name from the marker
3. Use the surrounding context to write a VALUE-FOCUSED caption
**CAPTION REQUIREMENTS:**
CRITICAL: Captions must be EXACTLY ONE SHORT SENTENCE capturing the core essence.
- Length: 8-15 words maximum
- No statistics, percentages, or numerical data
- Less is more—distill to the single most important insight
✓ Capture the core conceptual essence in one breath
✓ Be direct and concise—what's the ONE key insight?
✓ Focus on impact or principle, not detailed explanation
✗ Don't include multiple points or list consequences
✗ Don't use numbers, statistics, or percentages
✗ Don't just describe what's shown (that's the title's job)
Example:
{
"type": "visual",
"title": "Context and Agent Behavior",
"component": "AbstractShapesVisualization",
"caption": "Clean context prevents agent hallucinations"
}
**DO NOT:**
- Convert component markers into text bullet points
- Skip component markers
- Change the component name
- Generate a "concept" slide when you see a component marker
If you see [VISUAL_COMPONENT: X] anywhere in the content, it MUST become a visual slide.
CRITICAL CONSTRAINT: NEVER create a "visual" slide type unless there is an explicit [VISUAL_COMPONENT: X] marker in the source content. Do NOT invent visual components. If no marker exists, use "concept", "comparison", or "codeExecution" slide types instead.
CODE EXECUTION SLIDES:
Use the "codeExecution" slide type to visualize step-by-step processes like:
- Agent execution loops (human input → LLM prediction → agent execution → feedback)
- Algorithm execution flows
- Request/response cycles
- Multi-step workflows
Structure with highlightType for semantic color-coding (uses design system colors):
- **"human"** (white/neutral): Engineer/operator input, commands, task specifications, explicit constraints
- **"prediction"** (purple): LLM predictions, reasoning, decisions, "I will..." or "I should..." statements
- **"execution"** (green): Agent/software tool calls, deterministic actions (Read, Edit, Bash commands)
- **"feedback"** (purple light): Data/results returned from operations, outputs that LLM receives
- **"summary"** (white/neutral): Loop conditions, conclusions, final outcomes
SEMANTIC RULES (critical for correct color coding):
✓ "Engineer specifies task:" → human (operator input)
✓ "LLM predicts:" or "LLM decides:" → prediction (thinking/planning)
✓ "Agent executes: Read(...)" → execution (tool call)
✓ "File content returned:" → feedback (operation result)
✓ "LLM receives and predicts:" → prediction (NOT feedback - it's the prediction after receiving)
✓ "Loop continues until..." → summary (loop condition)
✓ Use for "how it works" explanations (3-8 steps typical)
✓ Include annotations to explain WHY each step happens
✓ Show the complete cycle from start to finish
✓ Maintain semantic consistency: what's DOING the action determines the type
✗ Don't use for static code examples (use "code" type instead)
✗ Don't create more than 10 steps (split into multiple slides if needed)
✗ Don't confuse "LLM receives data and predicts" (prediction) with "data returned" (feedback)
RECOGNIZING TEXTUAL CONTEXT FLOW PATTERNS (CRITICAL):
When you see code blocks showing conversation/execution flows with patterns like:
- "SYSTEM: ... USER: ... ASSISTANT: ... TOOL_RESULT: ..."
- Sequential back-and-forth between human, LLM, and tools
- Full execution traces showing how text flows through agent context
- Examples demonstrating the actual content of the context window
→ These are PEDAGOGICALLY CRITICAL and must be included as "codeExecution" slides
Why these matter MORE than config examples:
- They show the fundamental mental model of how agents operate
- They demystify what "context" actually contains
- They're the core learning insight, not just implementation details
How to handle them:
1. Break the flow into 8-12 logical steps (not necessarily every line)
2. Map conversation elements to highlightTypes:
- "SYSTEM:" or system instructions → human
- "USER:" or task specification → human
- "ASSISTANT:" thinking/reasoning → prediction
- "<tool_use>" or tool calls → execution
- "TOOL_RESULT:" or outputs → feedback
3. Add annotations explaining the significance of each step
4. Focus on the FLOW of text through the context, not just the code
Example transformation:
- Source: 67-line conversation showing full agent execution
- Slide: 10 steps highlighting key moments in the conversation flow
- Annotations: "Notice how the tool result becomes input to the next prediction"
PRIORITIZATION: Textual flow examples showing context mechanics trump configuration
examples like MCP setup. Configuration is implementation; textual flow is understanding.
SPEAKER NOTES GUIDELINES:
For each slide, provide speaker notes with:
1. **Talking points**: What to say (2-4 sentences)
✓ Explain what IS shown on the slide
✗ Do NOT describe "what the model would generate" for prompts
✗ Do NOT fabricate hypothetical outcomes or implementations
2. **Timing**: Estimated time to spend (e.g., "2 minutes")
3. **Discussion prompts**: Questions to engage students
4. **Real-world context**: Production scenarios to reference
5. **Transition**: How to move to next slide
CRITICAL CONSTRAINTS FOR SPEAKER NOTES:
✗ NEVER say "Notice what the model generated" when showing prompt examples alone
✗ NEVER describe hypothetical code that would result from a prompt (unless that code exists in the source)
✗ NEVER fabricate examples or scenarios not present in the source material
✓ Focus on explaining the content that IS on the slide
✓ Reference only examples and code that exist in the source
Example speaker notes:
\`\`\`
Talking points: This slide shows the difference between vague and specific prompts. The vague version gives no context, forcing the AI to guess. The specific version provides language, standard, edge cases, and return type.
Timing: 3-4 minutes - this is a critical concept
Discussion: Ask students to share examples of vague prompts they've used. Have them identify what's missing.
Real-world: In production, vague prompts lead to code that compiles but doesn't meet requirements. Specific prompts reduce iteration cycles from 5+ to 1-2.
Transition: "Now that we understand specificity, let's look at how to structure prompts for different tasks..."
\`\`\`
OUTPUT FORMAT:
You must generate a valid JSON file with this structure:
REMINDER: If the source contains prompt examples (text showing what to write to an AI coding assistant), you MUST use "code" or "codeComparison" slide types with language="text". NEVER convert prompts to bullet points in "concept" slides.
{
"metadata": {
"title": "Lesson Title",
"lessonId": "lesson-id",
"estimatedDuration": "30-45 minutes",
"learningObjectives": [
"Master active context engineering",
"Review agent plans before execution",
"Set up parallel workflows",
"Identify and prevent hallucinations"
]
},
"slides": [
{
"type": "title",
"title": "Lesson Title",
"subtitle": "Brief tagline",
"content": [],
"speakerNotes": {
"talkingPoints": "...",
"timing": "1 minute",
"discussion": "...",
"context": "...",
"transition": "..."
}
},
{
"type": "concept",
"title": "Slide Title",
"content": [
"Bullet point 1",
"Bullet point 2",
"Bullet point 3"
],
"speakerNotes": { ... }
},
{
"type": "code",
"title": "Code Example Title",
"language": "typescript",
"code": "function example() { ... }",
"caption": "Brief explanation",
"speakerNotes": { ... }
},
{
"type": "codeExecution",
"title": "Agent Execution Loop Example",
"steps": [
{
"line": "Engineer specifies: 'Add authentication middleware'",
"highlightType": "human",
"annotation": "Human provides explicit task and constraints"
},
{
"line": "LLM predicts: 'I should read existing auth patterns'",
"highlightType": "prediction",
"annotation": "Token prediction drives next action"
},
{
"line": "Agent executes: Read(src/middleware/auth.ts)",
"highlightType": "execution",
"annotation": "Deterministic tool execution"
},
{
"line": "File content returned to context",
"highlightType": "feedback",
"annotation": "Operation result available to LLM"
},
{
"line": "LLM analyzes patterns and predicts: 'I'll use JWT approach'",
"highlightType": "prediction",
"annotation": "Prediction incorporates new context"
},
{
"line": "Agent executes: Edit(src/app.ts, old, new)",
"highlightType": "execution",
"annotation": "Code modification"
},
{
"line": "Loop continues until tests pass",
"highlightType": "summary",
"annotation": "Iteration condition"
}
],
"speakerNotes": { ... }
},
COMPARISON SLIDE CONVENTION (CRITICAL - STYLING RULES):
The comparison slide type has TWO variants based on the "neutral" flag:
1. EVALUATIVE (default, neutral=false or omitted):
- LEFT side → RED background, RED heading, ✗ icons (ineffective/worse/limited)
- RIGHT side → GREEN background, GREEN heading, ✓ icons (effective/better/superior)
- Use when one approach is clearly inferior to the other
- Examples: "Ineffective vs Effective", "Traditional vs Modern", "Limited vs Superior"
2. NEUTRAL (neutral=true):
- BOTH sides → PURPLE background, PURPLE heading, → arrows (both valid options)
- Use for architectural trade-offs where both options are valid but have different characteristics
- Examples: "Autonomous vs Structured sub-agents", "Synchronous vs Asynchronous APIs"
FOR EVALUATIVE COMPARISONS, YOU MUST ALWAYS follow this convention:
- LEFT: The worse/ineffective/traditional/limited approach
- RIGHT: The better/effective/modern/superior approach
Correct examples:
- "Chat Interface" (left) vs "Agent Workflow" (right)
- "Heavy Mocking" (left) vs "Sociable Tests" (right)
- "Chat/IDE Agents" (left) vs "CLI Agents" (right)
- "Traditional RAG" (left) vs "Agentic RAG" (right)
INCORRECT: Putting the better option on the left will show it with RED ✗ styling!
{
"type": "comparison",
"title": "Ineffective vs Effective",
"left": {
"label": "Ineffective", // MANDATORY: LEFT = worse/ineffective/limited (RED ✗)
"content": ["Point 1", "Point 2"]
},
"right": {
"label": "Effective", // MANDATORY: RIGHT = better/effective/superior (GREEN ✓)
"content": ["Point 1", "Point 2"]
},
"speakerNotes": { ... }
},
{
"type": "comparison",
"title": "Architectural Trade-offs: Option A vs Option B",
"neutral": true, // OPTIONAL: Use neutral=true for valid trade-offs (PURPLE neutral styling)
"left": {
"label": "Option A", // Both options valid - neutral styling
"content": ["Point 1", "Point 2"]
},
"right": {
"label": "Option B", // Both options valid - neutral styling
"content": ["Point 1", "Point 2"]
},
"speakerNotes": { ... }
},
CODE COMPARISON SLIDE (codeComparison):
Like regular comparison slides, codeComparison also supports the "neutral" flag:
1. EVALUATIVE (default, neutral=false or omitted):
- LEFT side → RED background, RED heading (ineffective/worse code)
- RIGHT side → GREEN background, GREEN heading (effective/better code)
- Use when one code example is clearly inferior
2. NEUTRAL (neutral=true):
- BOTH sides → PURPLE background, PURPLE heading (both valid approaches)
- Use when comparing valid code alternatives with different trade-offs
- Examples: "Imperative vs Functional", "Optimized for Speed vs Readability"
{
"type": "codeComparison",
"title": "Prompt Example: Ineffective vs Effective",
"leftCode": {
"label": "Ineffective", // MANDATORY: LEFT = worse prompt (RED ✗)
"language": "text",
"code": "Could you help me write a function?\nThanks!"
},
"rightCode": {
"label": "Effective", // MANDATORY: RIGHT = better prompt (GREEN ✓)
"language": "text",
"code": "Write a TypeScript function that validates email addresses.\nHandle edge cases:\n- Invalid @ symbols\n- Missing domain\n\nReturn { valid: boolean }"
},
"speakerNotes": { ... }
},
{
"type": "codeComparison",
"title": "Code Style Trade-offs: Imperative vs Functional",
"neutral": true, // OPTIONAL: Use neutral=true for valid alternatives (PURPLE neutral styling)
"leftCode": {
"label": "Imperative Style", // Both styles valid - neutral styling
"language": "javascript",
"code": "const result = [];\nfor (let i = 0; i < items.length; i++) {\n if (items[i] > 10) {\n result.push(items[i] * 2);\n }\n}\nreturn result;"
},
"rightCode": {
"label": "Functional Style", // Both styles valid - neutral styling
"language": "javascript",
"code": "return items\n .filter(x => x > 10)\n .map(x => x * 2);"
},
"speakerNotes": { ... }
},
{
"type": "marketingReality",
"title": "Marketing vs Reality: What Actually Happens",
"metaphor": {
"label": "Marketing Speak",
"content": ["Metaphorical statement 1", "Metaphorical statement 2"]
},
"reality": {
"label": "Technical Reality",
"content": ["Technical explanation 1", "Technical explanation 2"]
},
"speakerNotes": { ... }
},
{
"type": "visual",
"title": "Visual Component",
"component": "${getValidVisualComponents().join(" | ")}",
"caption": "Description of what the visual shows",
"speakerNotes": { ... }
},
{
"type": "takeaway",
"title": "Key Takeaways",
"content": [
"Takeaway 1",
"Takeaway 2",
"Takeaway 3"
],
"speakerNotes": { ... }
}
]
}
KEY TAKEAWAYS GENERATION (TWO-STEP PROCESS):
When creating the "takeaway" slide at the end of the presentation:
STEP 1: First, review the lesson and mentally list ALL significant takeaways
- Identify every important concept, pattern, or insight from the lesson
- Don't filter yet—just enumerate everything worth remembering
STEP 2: Then, condense to the 3-5 MOST critical takeaways
- Prioritize by impact and generality (what will matter most in production?)
- Combine related points into higher-level insights when possible
- Remove redundant or overly specific points
- **STRICT REQUIREMENT: Each takeaway MUST be 5 words or fewer**
- Use active verbs and eliminate filler words
- Examples:
✓ "Tests ground agent code quality" (5 words)
✓ "Context management improves agent reliability" (5 words)
✓ "Prompt versioning prevents regression bugs" (5 words)
✗ "Tests are critical for agent workflows in production" (8 words)
✗ "You should manage context to improve reliability" (7 words)
IMPORTANT: The final takeaway slide MUST have exactly 3-5 items, even if the source material lists more.
Quality over quantity—choose the most impactful insights.
WORD COUNT VALIDATION: This is strictly enforced. The build will fail if any takeaway exceeds 5 words.
CRITICAL REQUIREMENTS:
1. The output MUST be valid JSON - no preamble, no explanation, just the JSON object
2. Write the JSON directly to the file: ${outputPath}
3. Include 8-15 slides (no more, no less)
4. Every slide MUST have speakerNotes with all fields
5. Code examples must be actual code from the lesson, not pseudocode
6. Content arrays MUST have 3-5 items (except title slide) - THIS IS STRICTLY ENFORCED
7. PROMPT EXAMPLES: Use "code" or "codeComparison" slide types, NEVER bullet points
8. Learning objectives MUST be 5 words or fewer - THIS IS STRICTLY ENFORCED
9. Takeaway items MUST be 5 words or fewer - THIS IS STRICTLY ENFORCED
BEFORE YOU GENERATE - CHECKLIST:
□ Did I identify all prompt examples in the source?
□ Will I use "codeComparison" type for those slides (NOT "concept")?
□ Will I set language="text" for prompt code blocks?
□ Will I copy the EXACT prompt text without paraphrasing?
□ Did I avoid converting prompts to explanatory bullet points?
TECHNICAL CONTENT TITLE: ${fileName}
TECHNICAL CONTENT:
${content}
IMPORTANT: Write the complete presentation JSON directly to the file: ${outputPath}
The file should contain ONLY valid JSON - no preamble, no markdown, no explanation.
Just write the raw JSON to the file now.`;
}
/**
* Call Claude Code CLI in headless mode to generate presentation
*/
async function generatePresentationWithClaude(prompt, outputPath) {
return new Promise((resolve, reject) => {
console.log(` 🤖 Calling Claude Code CLI (Opus 4.5)...`);
// Ensure output directory exists
mkdirSync(dirname(outputPath), { recursive: true });
// Spawn claude process with headless mode
const claude = spawn("claude", [
"-p", // Headless mode
"--model",
"opus", // Use Opus 4.5
"--allowedTools",
"Edit",
"Write", // Allow file editing and writing only
]);
let stdout = "";
let stderr = "";
claude.stdout.on("data", (data) => {
stdout += data.toString();
});
claude.stderr.on("data", (data) => {
stderr += data.toString();
});
claude.on("close", (code) => {
if (code !== 0) {
reject(new Error(`Claude CLI exited with code ${code}: ${stderr}`));
return;
}
// Check if Claude created the file
if (!existsSync(outputPath)) {
reject(
new Error(
`Claude did not create the output file: ${outputPath}\n` +
`Claude response: ${stdout.slice(0, 200)}`,
),
);
return;
}
console.log(` ✅ File created: ${outputPath}`);
// Read and validate JSON
let fileContent;
try {
fileContent = readFileSync(outputPath, "utf-8");
const presentation = JSON.parse(fileContent);
// Validate structure
if (!presentation.metadata || !presentation.slides) {
reject(
new Error(
"Invalid presentation structure - missing metadata or slides",
),
);
return;
}
if (presentation.slides.length < 8 || presentation.slides.length > 15) {
console.log(
` ⚠️ Warning: ${presentation.slides.length} slides (expected 8-15)`,
);
}
console.log(
` ✅ Valid presentation JSON (${presentation.slides.length} slides)`,
);
// Write the unmodified presentation to file for validation
// Line breaking will happen after validation passes
writeFileSync(
outputPath,
JSON.stringify(presentation, null, 2),
"utf-8",
);
resolve(presentation);
} catch (parseError) {
reject(
new Error(
`Failed to parse JSON: ${parseError.message}\nContent preview: ${fileContent?.slice(0, 200)}`,
),
);
return;
}
});
claude.on("error", (err) => {
reject(
new Error(
`Failed to spawn Claude CLI: ${err.message}. Is 'claude' installed and in PATH?`,
),
);
});
// Send prompt to stdin
claude.stdin.write(prompt);
claude.stdin.end();
});
}
// ============================================================================
// FILE DISCOVERY AND SELECTION
// ============================================================================
/**
* Find all markdown files recursively
*/
function findMarkdownFiles(dir) {
const files = [];
function traverse(currentDir) {
const items = readdirSync(currentDir);
for (const item of items) {
const fullPath = join(currentDir, item);
const stat = statSync(fullPath);
if (stat.isDirectory()) {
traverse(fullPath);
} else if (item.match(/\.(md|mdx)$/i) && !item.includes("CLAUDE.md")) {
files.push(fullPath);
}
}
}
traverse(dir);
return files.sort();
}
/**
* Filter files based on config
*/
function filterFiles(files, config, baseDir) {
if (config.file) {
const targetFile = join(baseDir, config.file);
return files.filter((f) => f === targetFile);
}
if (config.module) {
const modulePath = join(baseDir, config.module);
return files.filter((f) => f.startsWith(modulePath));
}
return files;
}
/**
* Interactive file selection
*/
async function promptSelectFile(files, baseDir) {
return new Promise((resolve, reject) => {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
console.log(`\n📚 Available files:\n`);
files.forEach((file, index) => {
const relativePath = relative(baseDir, file);
console.log(` ${index + 1}. ${relativePath}`);
});
console.log("\n");
rl.question(
"Select a file by number (or press Ctrl+C to exit): ",
(answer) => {
rl.close();
const selection = parseInt(answer, 10);
if (isNaN(selection) || selection < 1 || selection > files.length) {
reject(
new Error(
`Invalid selection: ${answer}. Please enter a number between 1 and ${files.length}.`,
),
);
return;
}
resolve([files[selection - 1]]);
},
);
});
}
// ============================================================================
// PROCESSING
// ============================================================================
/**
* Extract visual component names from parsed content
* @param {string} content - Parsed markdown content
* @returns {string[]} Array of component names
*/