<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="https://media.rss.com/style.xsl"?>
<rss xmlns:podcast="https://podcastindex.org/namespace/1.0" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:psc="http://podlove.org/simple-chapters" xmlns:atom="http://www.w3.org/2005/Atom" xml:lang="en" version="2.0">
  <channel>
    <title><![CDATA[Ai Change Desk]]></title>
    <link>https://www.michaelhbm.com/AiChangeDesk</link>
    <atom:link href="https://media.rss.com/aichangedesk/feed.xml" rel="self" type="application/rss+xml"/>
    <atom:link rel="hub" href="https://pubsubhubbub.appspot.com/"/>
    <description><![CDATA[<p>AI Change Desk helps leaders, managers, and operators make sense of AI changes and run adoption without hype. Every episode follows one format: context, impact, and action.</p>]]></description>
    <generator>RSS.com 2026.401.141116</generator>
    <lastBuildDate>Mon, 13 Apr 2026 21:21:43 GMT</lastBuildDate>
    <language>en</language>
    <copyright><![CDATA[MHBM 2026]]></copyright>
    <itunes:image href="https://media.rss.com/aichangedesk/20260211_040236_5a9a1006d341629b0ff318a93645ad91.png"/>
    <podcast:guid>dda312ba-d1b0-54c1-b0b6-4209725f6447</podcast:guid>
    
    <podcast:locked>yes</podcast:locked>
    <podcast:license>MHBM 2026</podcast:license>
    <itunes:author>Michael Hanna-Butros Meyering</itunes:author>
    <itunes:owner>
      <itunes:name>Michael Hanna-Butros Meyering</itunes:name>
    </itunes:owner>
    <itunes:explicit>false</itunes:explicit>
    <itunes:type>episodic</itunes:type>
    <itunes:category text="Technology"/>
    <podcast:podroll>
      <podcast:remoteItem feedGuid="139f5927-a662-5e39-9b1f-17c3a9f624b2"/>
      <podcast:remoteItem feedGuid="03721808-598b-5216-ba90-b851a4a44776"/>
      <podcast:remoteItem feedGuid="b5670d54-4259-5a93-b4d0-a00aa3e1d541"/>
    </podcast:podroll>
    <podcast:medium>podcast</podcast:medium>
    <podcast:txt purpose="ai-content">true</podcast:txt>
    <item>
      <title><![CDATA[AI Brief | EP008: Model release control validation]]></title>
      <itunes:title><![CDATA[AI Brief | EP008: Model release control validation]]></itunes:title>
      <description><![CDATA[<p>Two current operator signals, translated into a plain-language weekly control block.</p><ul><li>OpenAI announced plans to acquire Promptfoo, pushing testing/eval workflows further into default AI release practice.</li><li>Anthropic launched The Anthropic Institute while NIST reinforced monitoring guidance context for deployed AI systems.</li><li>A 35-minute operator block you can run weekly with one owner and clear pause authority.</li></ul><ol><li>Require a tiny evidence packet for each AI behavior change (3 prompts + pass/fail + approver + rollback owner).</li><li>Publish a one-page operator memo in plain language (approved, restricted, paused, exception path, next review).</li><li>Run one mini pause drill each week: "output is wrong; who pauses in 10 minutes?"</li><li>Block scale-up on any workflow missing named approver or rollback owner.</li></ol><ul><li>00:00 Cold open + framing</li><li>00:55 Boundary note complete / theme intro in</li><li>01:10 Signal 1: OpenAI/Promptfoo and release evidence</li><li>03:58 Signal 2: Anthropic Institute + NIST monitoring pressure</li><li>06:05 Next-week 35-minute action block</li><li>07:25 Close + outro</li></ul><ul><li><a target="_blank" rel="noopener noreferrer nofollow" href="https://openai.com/index/openai-to-acquire-promptfoo/">https://openai.com/index/openai-to-acquire-promptfoo/</a></li><li><a target="_blank" rel="noopener noreferrer nofollow" href="https://www.promptfoo.dev/blog/promptfoo-joining-openai">https://www.promptfoo.dev/blog/promptfoo-joining-openai</a></li><li><a target="_blank" rel="noopener noreferrer nofollow" href="https://techcrunch.com/2026/03/09/openai-acquires-promptfoo-to-secure-its-ai-agents/">https://techcrunch.com/2026/03/09/openai-acquires-promptfoo-to-secure-its-ai-agents/</a></li><li><a target="_blank" rel="noopener noreferrer nofollow" href="https://www.anthropic.com/news/the-anthropic-institute">https://www.anthropic.com/news/the-anthropic-institute</a></li><li><a target="_blank" rel="noopener noreferrer nofollow" href="https://www.theverge.com/ai-artificial-intelligence/892478/anthropic-institute-think-tank-claude-pentagon-jack-clark">https://www.theverge.com/ai-artificial-intelligence/892478/anthropic-institute-think-tank-claude-pentagon-jack-clark</a></li><li><a target="_blank" rel="noopener noreferrer nofollow" href="https://www.nist.gov/news-events/news/2026/03/new-report-challenges-monitoring-deployed-ai-systems">https://www.nist.gov/news-events/news/2026/03/new-report-challenges-monitoring-deployed-ai-systems</a></li><li><a target="_blank" rel="noopener noreferrer nofollow" href="https://www.nist.gov/publications/challenges-monitoring-deployed-ai-systems-center-ai-standards-and-innovation">https://www.nist.gov/publications/challenges-monitoring-deployed-ai-systems-center-ai-standards-and-innovation</a></li></ul><ul><li>Episode page: <a target="_blank" rel="noopener noreferrer nofollow" href="https://www.michaelhbm.com/AIChangeDesk">https://www.michaelhbm.com/AIChangeDesk/</a></li><li>Apple Podcasts: <a target="_blank" rel="noopener noreferrer nofollow" href="https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295">https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295</a></li><li>Spotify: <a target="_blank" rel="noopener noreferrer nofollow" href="https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD">https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD</a></li></ul><p>AI-assisted tools were used in parts of research and production support. Final editorial judgment and release approval remained human-led. This is operational guidance, not legal advice.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2619678</link>
      <enclosure url="https://content.rss.com/episodes/372404/2619678/aichangedesk/2026_03_12_03_01_32_82ffc104-a8c1-40c5-818c-2513854400f1.mp3" length="10033608" type="audio/mpeg"/>
      <guid isPermaLink="false">6ea744c0-dc81-4f2c-b2c5-8065f87bc064</guid>
      <itunes:duration>627</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>8</itunes:episode>
      <podcast:episode>8</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Wed, 11 Mar 2026 14:06:51 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260311_020350_87d4d6168616cfdb144935adad901edf.png"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2619678" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open and framing"/>
        <psc:chapter start="55" title="Theme intro and boundary note close"/>
        <psc:chapter start="1:10" title="Signal 1: OpenAI + Promptfoo and release evidence"/>
        <psc:chapter start="3:58" title="Signal 2: Anthropic Institute + NIST monitoring"/>
        <psc:chapter start="6:05" title="Next-week 35-minute action block"/>
        <psc:chapter start="7:25" title="Close and outro"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI Change Desk | EP007: Security Workflow Control Contract]]></title>
      <itunes:title><![CDATA[AI Change Desk | EP007: Security Workflow Control Contract]]></itunes:title>
      <description><![CDATA[<p>AI CHANGE DESK | EP007: SECURITY WORKFLOW CONTROL CONTRACT</p>
<p>If your AI can find a vulnerability, draft a patch, and open a PR, your biggest risk is no longer detection quality.</p>
<p>Your biggest risk is workflow ownership:</p>
<p>• who can analyze,</p>
<p>• who can approve,</p>
<p>• who can merge,</p>
<p>• who can pause,</p>
<p>• and who can attest the execution chain under pressure.</p>
<p>This episode translates four current signals into one operational playbook for next week.</p>
<p>WHAT CHANGED THIS WEEK</p>
<p>1. OpenAI launched Codex Security in research preview (2026-03-06).</p>
<p>2. Anthropic + Mozilla published concrete AI-assisted vulnerability workflow details (2026-03-06), including CVD and exploit-analysis references.</p>
<p>3. NIST published AI 800-4 on monitoring deployed AI systems (2026-03-06).</p>
<p>4. OpenAI launched GPT-5.4 and ChatGPT for Excel beta (2026-03-05), expanding business-user AI execution surfaces.</p>
<p>OPERATOR TRANSLATION</p>
<p>• Treat AI security pipelines as action-controlled workflows, not assistant features.</p>
<p>• Separate discovery throughput from remediation readiness.</p>
<p>• Move monitoring from dashboarding to a named ownership control.</p>
<p>• Add spreadsheet-AI usage controls where sensitive decisions or data handling occur.</p>
<p>MONDAY BLOCK (45 MINUTES, ONE OWNER)</p>
<p>• Minute 0-10: action matrix lock (Analyze, Draft fix, Open PR, Merge, Deploy) with allowed/checkpointed/restricted levels.</p>
<p>• Minute 10-20: credential and identity check (remove over-scoped inherited credentials).</p>
<p>• Minute 20-30: evidence contract (logs, retention, export path, access controls).</p>
<p>• Minute 30-40: disclosure + rollback ownership (name owners, define stop authority).</p>
<p>• Minute 40-45: operator memo (what changed, what is approved, what is restricted, who approves exceptions, next review date).</p>
<p>LINKS</p>
<p>• Episode page: https://www.michaelhbm.com/AIChangeDesk/episodes/ep007-security-workflow-control-contract.html</p>
<p>• YouTube channel: https://www.youtube.com/@AIChangeDesk</p>
<p>• RSS show: https://media.rss.com/aichangedesk/feed.xml</p>
<p>• Apple Podcasts: https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295</p>
<p>• Spotify: https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD</p>
<p>SOURCES</p>
<p>• OpenAI (2026-03-06): https://openai.com/index/codex-security-now-in-research-preview/</p>
<p>• Anthropic + Mozilla collaboration post (2026-03-06): https://www.anthropic.com/news/mozilla-firefox-security</p>
<p>• Anthropic coordinated disclosure policy (2026-03-06): https://www.anthropic.com/coordinated-vulnerability-disclosure</p>
<p>• Anthropic exploit analysis (2026-03-06): https://red.anthropic.com/2026/exploit/</p>
<p>• Mozilla Firefox blog corroboration (2026-03-06): https://blog.mozilla.org/en/firefox/hardening-firefox-anthropic-red-team/</p>
<p>• NIST AI 800-4 publication page (2026-03-06): https://www.nist.gov/publications/challenges-monitoring-deployed-ai-systems-center-ai-standards-and-innovation</p>
<p>• OpenAI GPT-5.4 launch (2026-03-05): https://openai.com/index/introducing-gpt-5-4/</p>
<p>• OpenAI ChatGPT for Excel (2026-03-05): https://openai.com/index/chatgpt-for-excel/</p>
<p>DISCLOSURE</p>
<p>AI-assisted tools were used in parts of the research and production workflow. Final editorial judgment, risk posture, and release approval stayed human-led.</p>
<p>This is operational guidance, not legal advice. These are my opinions and are not representative of any organization.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2612489</link>
      <enclosure url="https://content.rss.com/episodes/372404/2612489/aichangedesk/2026_03_09_15_14_36_66426f2b-c87d-4a2a-8baa-4da7373ff0c8.mp3" length="24081307" type="audio/mpeg"/>
      <guid isPermaLink="false">5e92ee5e-ff7a-4bb6-80cd-a8a46f1dac80</guid>
      <itunes:duration>1505</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>7</itunes:episode>
      <podcast:episode>7</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Mon, 09 Mar 2026 15:05:42 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260309_050301_f1775348bbceab01e8ef68af27f2bbbd.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2612489/transcript" type="text/plain"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2612489" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open + show contract"/>
        <psc:chapter start="2:10" title="Why this episode now (EP005 + EP006 bridge)"/>
        <psc:chapter start="4:20" title="Story 1: Codex Security and workflow ownership"/>
        <psc:chapter start="8:20" title="Story 2: Anthropic + Mozilla CVD workflow lanes"/>
        <psc:chapter start="12:45" title="Story 3: NIST AI 800-4 monitoring control pack"/>
        <psc:chapter start="16:40" title="Story 4: GPT-5.4 + Excel execution-surface shift"/>
        <psc:chapter start="19:50" title="Failure scenario replay with control layering"/>
        <psc:chapter start="22:05" title="Monday action block (45 minutes, one owner)"/>
        <psc:chapter start="23:40" title="Resistance handling + scorecard"/>
        <psc:chapter start="24:35" title="30-60-90 path, close, and outro"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[Episode 06: AI Brief: GPT-5.3 and continuity controls]]></title>
      <itunes:title><![CDATA[Episode 06: AI Brief: GPT-5.3 and continuity controls]]></itunes:title>
      <description><![CDATA[
<p>Two current operator signals, translated into one concrete next-week action block.</p>

<ul>
<li>OpenAI released GPT-5.3 Instant and published system-card details.</li>
<li>Vendor continuity pressure stayed elevated through Anthropic policy-dispute and blacklist-risk signals.</li>
<li>A 30-minute Monday control loop to keep model release and fallback controls current.</li>
</ul>

<ol>
<li>Treat model releases as workflow change events, not just product updates.</li>
<li>Run a 3-prompt regression pack before broad rollout after model changes.</li>
<li>Confirm rollback owner + stop authority for critical AI workflows.</li>
<li>Define one tested fallback path for top three AI-enabled workflows.</li>
<li>Send a plain-language operator memo each Monday (approved/restricted/escalation).</li>
</ol>

<ul>
<li>00:00 Cold open + framing</li>
<li>00:39 Boundary note complete / theme intro in</li>
<li>00:54 Signal 1: GPT-5.3 Instant and release governance</li>
<li>02:25 Signal 2: vendor continuity pressure</li>
<li>03:45 Monday action block (30-minute control loop)</li>
<li>04:31 Close + outro</li>
</ul>

<ul>
<li><a href="https://openai.com/index/gpt-5-3-instant/">https://openai.com/index/gpt-5-3-instant/</a></li>
<li><a href="https://openai.com/index/gpt-5-3-instant-system-card/">https://openai.com/index/gpt-5-3-instant-system-card/</a></li>
<li><a href="https://www.anthropic.com/news/statement-comments-secretary-war">https://www.anthropic.com/news/statement-comments-secretary-war</a></li>
<li><a href="https://techcrunch.com/2026/03/02/tech-workers-urge-dod-congress-to-withdraw-anthropic-label-as-a-supply-chain-risk/">https://techcrunch.com/2026/03/02/tech-workers-urge-dod-congress-to-withdraw-anthropic-label-as-a-supply-chain-risk/</a></li>
<li><a href="https://techcrunch.com/2026/02/27/anthropic-vs-the-pentagon-whats-actually-at-stake/">https://techcrunch.com/2026/02/27/anthropic-vs-the-pentagon-whats-actually-at-stake/</a></li>
</ul>

<ul>
<li>Episode page: <a href="https://www.michaelhbm.com/AIChangeDesk/episodes/brief-2026-03-04-ai-brief.html">https://www.michaelhbm.com/AIChangeDesk/episodes/brief-2026-03-04-ai-brief.html</a></li>
<li>Apple Podcasts: <a href="https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295">https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295</a></li>
<li>Spotify: <a href="https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD">https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD</a></li>
</ul>

<p>AI-assisted tools were used in parts of research and production support. Final editorial judgment and release approval remained human-led. This is operational guidance, not legal advice.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2599487</link>
      <enclosure url="https://content.rss.com/episodes/372404/2599487/aichangedesk/2026_03_04_05_23_00_c5449f9a-e9f6-4d87-b70c-aeaa77ff0082.mp3" length="4496447" type="audio/mpeg"/>
      <guid isPermaLink="false">547331ab-1c75-4015-ab88-d4f1241f1f65</guid>
      <itunes:duration>280</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>6</itunes:episode>
      <podcast:episode>6</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Wed, 04 Mar 2026 13:00:00 GMT</pubDate>
      <podcast:txt purpose="ai-content">true</podcast:txt>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260304_060357_744947e4b600c17f7eb37d2ae40d7413.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2599487/transcript" type="text/plain"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2599487" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open and framing"/>
        <psc:chapter start="39" title="Theme intro and boundary note close"/>
        <psc:chapter start="54" title="Signal 1: GPT-5.3 Instant and release governance"/>
        <psc:chapter start="2:25" title="Signal 2: vendor continuity pressure"/>
        <psc:chapter start="3:45" title="Monday action block: 30-minute control loop"/>
        <psc:chapter start="4:31" title="Close and outro"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI Change Desk | EP005: Run Agents Without Losing Control]]></title>
      <itunes:title><![CDATA[AI Change Desk | EP005: Run Agents Without Losing Control]]></itunes:title>
      <description><![CDATA[AI CHANGE DESK | EP005: RUN AGENTS WITHOUT LOSING CONTROL

If AI systems can execute actions in your environment, governance has to move from policy language to access control execution.

This episode translates current signals into practical controls for operators: action-tier permissions, scoped credentials, human approval thresholds, deployment tier decisions, and a weekly control desk teams can run quickly.

WHAT YOU WILL GET
• A practical access-control framework for agent-enabled workflows.
• Action-tier classification you can apply this week (read, draft, update-internal, external-send, system-admin).
• A deployment control checklist for connected/hybrid/disconnected environments.
• A standards-aligned procurement starter (identity, interoperability, proportional controls).
• A Monday control desk + metrics scorecard + 30-60-90 implementation sequence.

TIMESTAMPS
• 00:00 Cold open — access control is the operating risk
• 00:50 Intro, disclosure, and show contract
• 02:15 Why EP005 now (bridge from EP003 + EP004)
• 04:10 Story 1 — Anthropic + Vercept and action-tier controls
• 08:30 Story 2 — OpenAI elevated-risk controls and malicious-use patterns
• 12:10 Story 3 — Sovereign deployment and architecture obligations
• 15:35 Story 4 — NIST standards + proportional controls
• 18:55 Scenario walkthrough + risk check
• 21:40 Monday Access Control Desk
• 24:15 Metrics, 30-60-90 plan, FAQ, and control drills
• 25:04 Close + outro

MONDAY ACTIONS (RUN THIS NEXT WEEK)
1. Classify top five AI workflows by action tier.
2. Scope credentials for the highest-impact workflow.
3. Name stop-authority owner for each critical workflow.
4. Set approval thresholds for external-send and system-admin actions.
5. Publish one-page operator update with approved/restricted actions and escalation path.

SOURCES
• https://www.anthropic.com/news/anthropic-acquires-vercept
• https://techcrunch.com/2026/02/25/anthropic-acquires-vercept-to-expand-computer-use-agents/
• https://openai.com/index/introducing-lockdown-mode-and-elevated-risk-labels-in-chatgpt-safety/
• https://openai.com/index/disrupting-malicious-ai-uses/
• https://www.microsoft.com/en-us/microsoft-cloud/blog/2026/02/24/announcing-sovereign-cloud-ai-updates/
• https://www.microsoft.com/en-us/industry/blog/government/2026/02/24/accelerating-government-mission-with-microsoft-sovereign-cloud/
• https://www.nist.gov/caisi/ai-agent-standards-initiative
• https://www.nist.gov/artificial-intelligence/ai-agent-interoperability-and-efficiency-standards-request-information
• https://digital-strategy.ec.europa.eu/en/library/eu-ai-office-and-jrc-publish-report-proportionality-ai
• https://ai-watch.ec.europa.eu/publications/eu-ai-office-and-jrc-report-proportionality-trustworthy-ai

LISTEN
• YouTube: https://www.youtube.com/@AIChangeDesk
• Spotify: https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD
• Apple Podcasts: https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295

LISTENER QUESTION
Where is your organization most exposed right now: permission scope, approval thresholds, or action logging?

DISCLOSURE
AI-assisted tools were used in parts of drafting, synthesis, and production support. Final editorial judgment and release approval remained human-led.]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2594143</link>
      <enclosure url="https://content.rss.com/episodes/372404/2594143/aichangedesk/2026_03_02_17_43_42_436bc007-0d7d-4297-9707-b7eae154226b.mp3" length="24222031" type="audio/mpeg"/>
      <guid isPermaLink="false">3285da78-cb39-422a-9cca-b06698de91f5</guid>
      <itunes:duration>1513</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>5</itunes:episode>
      <podcast:episode>5</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Mon, 02 Mar 2026 17:43:41 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260302_060334_2811949132e8da26a0ec671025393cd8.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2594143/transcript" type="text/plain"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2594143" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open: access control is the operating risk"/>
        <psc:chapter start="50" title="Intro, disclosure, and episode contract"/>
        <psc:chapter start="2:15" title="Why EP005 now: from governance loop to execution control"/>
        <psc:chapter start="4:10" title="Story 1: Anthropic + Vercept and action-tier controls"/>
        <psc:chapter start="8:30" title="Story 2: OpenAI risk labels and malicious-use signals"/>
        <psc:chapter start="12:10" title="Story 3: Sovereign deployment and architecture obligations"/>
        <psc:chapter start="15:35" title="Story 4: NIST standards + proportional controls"/>
        <psc:chapter start="18:55" title="Scenario walkthrough + risk and reality check"/>
        <psc:chapter start="21:40" title="Monday 45-minute access control desk"/>
        <psc:chapter start="24:15" title="Metrics, 30-60-90 plan, FAQ, and drill recap"/>
        <psc:chapter start="25:04" title="Close and outro"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI Brief: what changed this week]]></title>
      <itunes:title><![CDATA[AI Brief: what changed this week]]></itunes:title>
      <description><![CDATA[
<p>Two operator-relevant signals from this week, translated into concrete controls teams can execute immediately.</p>

<ul>
<li>Distillation attacks moved from model-lab concern to enterprise operations risk.</li>
<li>NIST's AI Agent Standards Initiative reinforced near-term interoperability and accountability expectations.</li>
<li>A 25-minute weekly governance desk loop you can run every Monday.</li>
</ul>

<ol>
<li>Treat provider security bulletins as workflow events, not background reading.</li>
<li>Classify AI usage into open-assist, controlled-assist, and restricted classes.</li>
<li>Add interoperability and control portability checks to AI procurement intake.</li>
<li>Require a human accountability map for every agent-like workflow.</li>
<li>Ship a one-page operator update: what changed, what to do, what not to do.</li>
</ol>

<ul>
<li>00:00 Cold open: policy that cannot survive Monday is policy theater</li>
<li>01:00 Theme intro</li>
<li>01:16 Framing and disclosure</li>
<li>01:57 Signal 1: distillation attacks and model-control hardening</li>
<li>04:30 Signal 2: standards momentum as procurement and controls signal</li>
<li>06:57 Monday checklist: 25-minute governance desk</li>
<li>08:06 Close</li>
<li>08:18 Final reminder: one owner, one decision, one due date</li>
<li>08:27 Brand outro</li>
</ul>

<ul>
<li><a href="https://www.anthropic.com/news/detecting-and-preventing-distillation-attacks">https://www.anthropic.com/news/detecting-and-preventing-distillation-attacks</a></li>
<li><a href="https://www.businessinsider.com/anthropic-deepseek-distillation-minimax-moonshot-ai-2026-2">https://www.businessinsider.com/anthropic-deepseek-distillation-minimax-moonshot-ai-2026-2</a></li>
<li><a href="https://www.nist.gov/caisi/ai-agent-standards-initiative">https://www.nist.gov/caisi/ai-agent-standards-initiative</a></li>
<li><a href="https://www.ansi.org/standards-news/all-news/2-18-26-nist-launches-ai-agent-standards-initiative">https://www.ansi.org/standards-news/all-news/2-18-26-nist-launches-ai-agent-standards-initiative</a></li>
<li><a href="https://www.nist.gov/news-events/news/2026/02/nist-seeks-public-input-advance-ai-agent-interoperability-and-efficiency">https://www.nist.gov/news-events/news/2026/02/nist-seeks-public-input-advance-ai-agent-interoperability-and-efficiency</a></li>
</ul>

<ul>
<li>Website episode page: <a href="https://www.michaelhbm.com/AIChangeDesk/episodes/brief-2026-02-25-ai-brief.html">https://www.michaelhbm.com/AIChangeDesk/episodes/brief-2026-02-25-ai-brief.html</a></li>
<li>Apple Podcasts: <a href="https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295">https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295</a></li>
<li>Spotify: <a href="https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD">https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD</a></li>
</ul>

<p>AI-assisted tools were used in research and production support. Final editorial judgment and release approval remained human-led.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2576031</link>
      <enclosure url="https://content.rss.com/episodes/372404/2576031/aichangedesk/2026_02_24_21_17_19_20ec3f75-00b6-4326-8e5e-c5bdabb0a280.mp3" length="8163621" type="audio/mpeg"/>
      <guid isPermaLink="false">59c9c6e3-ba51-4741-8f02-00a0ba7faa86</guid>
      <itunes:duration>510</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>4</itunes:episode>
      <podcast:episode>4</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Wed, 25 Feb 2026 13:00:00 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260225_010214_4f4462b06c8d3ce4ef5ee6f2aa9ef9c3.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2576031/transcript" type="text/plain"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2576031" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open: policy that cannot survive Monday is policy theater"/>
        <psc:chapter start="1:00" title="Theme intro"/>
        <psc:chapter start="1:16" title="Framing and disclosure"/>
        <psc:chapter start="1:57" title="Signal 1: distillation attacks and model control hardening"/>
        <psc:chapter start="4:30" title="Signal 2: standards momentum as procurement signal"/>
        <psc:chapter start="6:57" title="Monday action checklist: 25-minute governance desk"/>
        <psc:chapter start="8:06" title="Close"/>
        <psc:chapter start="8:18" title="Final reminder: one owner, one decision, one due date"/>
        <psc:chapter start="8:27" title="Brand outro"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI governance implementation for operators: turning policy into weekly execution]]></title>
      <itunes:title><![CDATA[AI governance implementation for operators: turning policy into weekly execution]]></itunes:title>
      <description><![CDATA[EP003: AI GOVERNANCE IMPLEMENTATION FOR OPERATORS

AI governance breaks when it lives as a policy document and not as a weekly operating loop.

In this main episode, we use current market signals (model updates, AI security tooling, regional deployment strategy, and standards activity) to show how leaders and operators can run governance as execution instead of theory.

WHAT YOU WILL GET
• A practical model-change governance workflow you can run every week.
• Security workflow controls for AI-assisted code review.
• Procurement and data-governance actions triggered by regional/partner deployment signals.
• A reusable weekly AI Governance Desk format with owner, controls, and communication outputs.
• A late-update block on alignment-research funding and regulated-industry deployment signals.

TIMESTAMPS
• 00:00 Cold open — governance is a workflow, not a PDF
• 00:59 Intro music + disclosure
• 01:20 Why this episode now (EP001/EP002 bridge)
• 03:20 Story 1 — Claude Sonnet 4.6 and model-change governance
• 07:50 Story 2 — Claude Code Security and human-in-the-loop controls
• 12:20 Story 3 — OpenAI for India + Tata and procurement reality
• 16:00 Story 4 — NIST AI agent interoperability signal
• 18:10 Late updates — alignment funding + regulated-industry collaboration
• 19:00 Weekly AI Governance Desk (25-minute operating loop)
• 22:05 Postscript — chat-code controls + workflow-class policy mapping
• 23:25 Monday morning actions
• 24:25 Outro + listener question

MONDAY MORNING ACTIONS
1. Name one owner for weekly AI governance desk operations.
2. Run a model-change regression check on your top workflows.
3. Require human approval for AI-generated security patches/findings.
4. Update procurement clauses (data handling, change notifications, sub-processors).
5. Publish a one-page internal update: what changed, what to do, what not to do.

SOURCES
• https://www.anthropic.com/news/claude-sonnet-4-6
• https://docs.anthropic.com/en/release-notes/api#feb-17th-2026
• https://www.anthropic.com/news/claude-code-security
• https://docs.anthropic.com/en/docs/claude-code/security
• https://openai.com/index/openai-for-india/
• https://www.tata.com/newsroom/openai-and-tata-group-announce-strategic-collaboration
• https://www.nist.gov/news-events/news/2026/02/nist-seeks-public-input-advance-ai-agent-interoperability-and-efficiency
• https://www.federalregister.gov/documents/2026/02/20/2026-02979/ai-agent-interoperability-and-efficiency-standards-request-for-information
• https://openai.com/index/advancing-independent-research-ai-alignment/
• https://alignmentproject.aisi.gov.uk/
• https://www.anthropic.com/news/anthropic-infosys
• https://www.infosys.com/newsroom/press-releases/2026/advanced-enterprise-ai-solutions-industries.html

LISTEN
• Spotify: https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD
• Apple Podcasts: https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295

DISCLOSURE
AI-assisted tools were used in parts of drafting, synthesis, and production support. Final editorial judgment and release approval remained with the host.]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2572418</link>
      <enclosure url="https://content.rss.com/episodes/372404/2572418/aichangedesk/2026_02_23_16_12_03_24fccf75-c2c2-4741-8a7a-0726a03ce301.mp3" length="24005946" type="audio/mpeg"/>
      <guid isPermaLink="false">10d52699-bd04-40ad-9e7e-e1302459ac7c</guid>
      <itunes:duration>1500</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>3</itunes:episode>
      <podcast:episode>3</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Mon, 23 Feb 2026 16:12:02 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260223_040202_66d74ad2f57490ef989e3d670cbdfd14.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2572418/transcript" type="text/plain"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2572418" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open: governance is a workflow"/>
        <psc:chapter start="59" title="Intro and disclosure"/>
        <psc:chapter start="1:20" title="Why this episode now"/>
        <psc:chapter start="3:20" title="Story 1: Sonnet 4.6 and model-change controls"/>
        <psc:chapter start="7:50" title="Story 2: Claude Code Security governance"/>
        <psc:chapter start="12:20" title="Story 3: OpenAI India and procurement implications"/>
        <psc:chapter start="16:00" title="Story 4: NIST AI agent interoperability signal"/>
        <psc:chapter start="18:10" title="Late updates: alignment funding and regulated-industry signals"/>
        <psc:chapter start="19:00" title="Weekly AI Governance Desk (implementation loop)"/>
        <psc:chapter start="22:05" title="Postscript: chat-code controls and workflow policy classes"/>
        <psc:chapter start="23:25" title="Monday morning action list"/>
        <psc:chapter start="24:25" title="Outro and listener prompt"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI policy basics for operators: what this week changed]]></title>
      <itunes:title><![CDATA[AI policy basics for operators: what this week changed]]></itunes:title>
      <description><![CDATA[<p>EP002: AI policy basics for operators.</p>
<p>This episode translates AI policy concepts into practical operating decisions for leaders, managers, and delivery teams.</p>

<ul>
<li>Episode: 002</li>
<li>Title: AI policy basics for operators</li>
<li>Runtime: 10m 30s</li>
<li>Host: Michael Hanna-Butros Meyering</li>
</ul>

<p>AI policy works only when it is written as operational guidance people can apply in daily workflows.</p>

<ul>
<li>00:00 Why AI policy fails in real teams</li>
<li>01:20 Story 1: Claude Sonnet 4.6 and model-change governance</li>
<li>04:40 Story 2: AI infrastructure cost signals and procurement controls</li>
<li>07:40 Action block: policy + change management implementation</li>
<li>09:40 Monday-morning actions + outro</li>
</ul>

<ul>
<li>Anthropic launched Claude Sonnet 4.6 (February 17, 2026), which reinforces the need for model-upgrade controls and evaluation gates in internal policy.</li>
<li>Anthropic announced it will cover electricity price increases tied to data-center growth (February 17, 2026), making infrastructure impact a practical procurement and governance issue.</li>
</ul>

<ul>
<li>Scope: which AI use cases are allowed, restricted, or prohibited.</li>
<li>Data: which data classes may be used with which tools.</li>
<li>Controls: review, logging, exception handling, and escalation.</li>
<li>Accountability: who owns policy updates and incident response.</li>
</ul>

<ul>
<li>Add a model-change trigger section to your AI policy (when re-evaluation is mandatory).</li>
<li>Add three infrastructure-risk questions to AI vendor intake.</li>
<li>Run one manager briefing with a clear script for allowed/restricted use.</li>
<li>Audit one active AI workflow for drift between policy and real usage.</li>
</ul>

<ul>
<li>Anthropic, “Announcing Claude Sonnet 4.6”: <a href="https://www.anthropic.com/news/claude-sonnet-4-6">https://www.anthropic.com/news/claude-sonnet-4-6</a></li>
<li>TechCrunch coverage, “Anthropic releases Claude Sonnet 4.6”: <a href="https://techcrunch.com/2026/02/17/anthropic-releases-claude-sonnet-4-6/">https://techcrunch.com/2026/02/17/anthropic-releases-claude-sonnet-4-6/</a></li>
<li>Anthropic, “Covering electricity price increases from AI data centers”: <a href="https://www.anthropic.com/news/covering-electricity-price-increases">https://www.anthropic.com/news/covering-electricity-price-increases</a></li>
<li>Reuters coverage (via Investing.com): <a href="https://www.investing.com/news/stock-market-news/anthropic-to-cover-electricity-price-increases-in-areas-where-it-builds-data-centers-3894580">https://www.investing.com/news/stock-market-news/anthropic-to-cover-electricity-price-increases-in-areas-where-it-builds-data-centers-3894580</a></li>
<li>NIST AI Risk Management Framework: <a href="https://www.nist.gov/itl/ai-risk-management-framework">https://www.nist.gov/itl/ai-risk-management-framework</a></li>
<li>NIST Generative AI Profile: <a href="https://www.nist.gov/publications/artificial-intelligence-risk-management-framework-generative-artificial-intelligence">https://www.nist.gov/publications/artificial-intelligence-risk-management-framework-generative-artificial-intelligence</a></li>
<li>OECD AI Principles: <a href="https://oecd.ai/en/ai-principles">https://oecd.ai/en/ai-principles</a></li>
<li>ISO/IEC 42001 overview: <a href="https://www.iso.org/standard/81230.html">https://www.iso.org/standard/81230.html</a></li>
</ul>

<p>This episode uses AI-assisted production tools (voice rendering, editing support, and publishing automation). Final editorial and risk decisions are human-led.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2559367</link>
      <enclosure url="https://content.rss.com/episodes/372404/2559367/aichangedesk/2026_02_19_00_28_00_f99a5d8f-4a72-4a93-85e7-4aaca2900ff1.mp3" length="10080713" type="audio/mpeg"/>
      <guid isPermaLink="false">5831081f-5cc7-472b-9183-2372f7bd9e27</guid>
      <itunes:duration>629</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>2</itunes:episode>
      <podcast:episode>2</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Thu, 19 Feb 2026 00:01:54 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260219_120211_895cdc422a14ca52a2a446f89da648ff.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2559367/transcript" type="text/plain"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2559367" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Context: why policy fails in real teams"/>
        <psc:chapter start="1:20" title="Story 1: Claude Sonnet 4.6 and model-change governance"/>
        <psc:chapter start="4:40" title="Story 2: infrastructure cost signals and procurement controls"/>
        <psc:chapter start="7:40" title="Action block: policy + change management implementation"/>
        <psc:chapter start="9:40" title="Monday-morning actions + outro"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[Welcome to AI Change Desk]]></title>
      <itunes:title><![CDATA[Welcome to AI Change Desk]]></itunes:title>
      <description><![CDATA[<p>Welcome to episode one of AI Change Desk.</p>
<p>This launch episode introduces the mission of the show and a practical framework you can use immediately to manage AI rollout decisions in your organization.</p>

<ul>
<li>Episode: EP001</li>
<li>Title: Welcome to AI Change Desk</li>
<li>Runtime: 6m 25s (launch edition)</li>
<li>Host: Michael Hanna-Butros Meyering</li>
</ul>

<ul>
<li>00:00 Cold open: the 3 questions teams keep asking about AI</li>
<li>00:42 Intro (show ID)</li>
<li>00:57 Show mission: AI as an operating shift, not a tool announcement</li>
<li>01:39 Plain-English definitions: AI, LLM, and change management</li>
<li>02:34 Personal context + why this show exists</li>
<li>03:21 Boundaries + AI-use disclosure</li>
<li>04:06 Show contract: practical, credible, actionable</li>
<li>04:39 4D Desk Memo: Decision, Data, Drift, Deployment</li>
<li>05:30 Inner workflow: how this podcast is produced</li>
<li>06:04 Listener question + outro</li>
<li>06:15 Outro (show close)</li>
</ul>

<ul>
<li>AI rollouts fail more often from adoption and governance gaps than model quality.</li>
<li>Treat AI changes as operational decisions with clear ownership and controls.</li>
<li>Use the 4D Desk Memo to make fast, defensible decisions: Decision, Data, Drift, and Deployment.</li>
</ul>

<p>This episode used AI-assisted production for:</p>
<ul>
<li>Script drafting support</li>
<li>Voice synthesis through an authorized ElevenLabs voice model</li>
<li>Packaging and publishing automation</li>
</ul>
<p>Final editorial decisions, risk posture, and publication approval were made by Michael Hanna-Butros Meyering.</p>

<ul>
<li>Daily research scan</li>
<li>Source verification and editorial filtering</li>
<li>Script lock in Context -&gt; Impact -&gt; Action format</li>
<li>Voice rendering through ElevenLabs API</li>
<li>Audio QA</li>
<li>RSS.com episode publishing</li>
<li>Google Cloud Storage + Google Sites web publishing</li>
</ul>

<ul>
<li>ElevenLabs API quickstart: <a href="https://elevenlabs.io/docs/eleven-api/quickstart">https://elevenlabs.io/docs/eleven-api/quickstart</a></li>
<li>RSS.com Core API docs: <a href="https://api.rss.com/v4/docs">https://api.rss.com/v4/docs</a></li>
<li>Google Cloud Storage static hosting: <a href="https://cloud.google.com/storage/docs/hosting-static-website">https://cloud.google.com/storage/docs/hosting-static-website</a></li>
<li>Episode page: <a href="https://www.michaelhbm.com/AIChangeDesk/episodes/ep001-welcome-to-ai-change-desk.html">https://www.michaelhbm.com/AIChangeDesk/episodes/ep001-welcome-to-ai-change-desk.html</a></li>
<li>Transcript (TXT): <a href="https://storage.googleapis.com/site-app-html/AIChangeDesk/transcripts/ep001-welcome-to-ai-change-desk.txt">https://storage.googleapis.com/site-app-html/AIChangeDesk/transcripts/ep001-welcome-to-ai-change-desk.txt</a></li>
<li>RSS feed: <a href="https://media.rss.com/aichangedesk/feed.xml">https://media.rss.com/aichangedesk/feed.xml</a></li>
</ul>

<p>What is one AI-related decision your organization keeps postponing right now?</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2542954</link>
      <enclosure url="https://content.rss.com/episodes/372404/2542954/aichangedesk/2026_02_13_22_58_13_180f1802-9f60-41e6-95e7-3e22cb33e580.mp3" length="6167158" type="audio/mpeg"/>
      <guid isPermaLink="false">6dba454b-ab5f-40c2-8cd7-09884b4e4030</guid>
      <itunes:duration>385</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>1</itunes:episode>
      <podcast:episode>1</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Wed, 11 Feb 2026 21:18:04 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260213_050248_35d13a9e1afca000ddcd90e1d55513c6.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2542954/transcript" type="text/plain"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2542954" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open: the 3 questions teams keep asking about AI"/>
        <psc:chapter start="42" title="Intro (show ID)"/>
        <psc:chapter start="57" title="Show mission: AI as an operating shift"/>
        <psc:chapter start="1:39" title="Plain-English definitions: AI, LLM, and change management"/>
        <psc:chapter start="2:34" title="Personal context and why this show exists"/>
        <psc:chapter start="3:21" title="Boundaries + AI-use disclosure"/>
        <psc:chapter start="4:06" title="Show contract: practical, credible, actionable"/>
        <psc:chapter start="4:39" title="4D Desk Memo: Decision, Data, Drift, Deployment"/>
        <psc:chapter start="5:30" title="Inner workflow: how this podcast is produced"/>
        <psc:chapter start="6:04" title="Listener question + outro"/>
        <psc:chapter start="6:15" title="Outro (show close)"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI Change Desk | EP017: Merchant Control Check]]></title>
      <itunes:title><![CDATA[AI Change Desk | EP017: Merchant Control Check]]></itunes:title>
      <description><![CDATA[
<p>AI shopping is getting more complicated in a way that looks neat in demos and messy in operations.</p>
<p>This episode follows EP014 and asks the tighter version of the same question: once discovery starts in ChatGPT, Google AI Mode, or another AI shopping surface, who actually owns the sale, the attribution, the checkout path, and the support policy that comes after it?</p>

<ul>
<li>Why OpenAI’s shift toward product discovery and merchant-controlled checkout matters</li>
<li>Why Shopify’s agentic storefront tools make AI shopping feel more like channel ops than hype</li>
<li>Why Google’s personalization and protocol work make QA and merchandising harder to reproduce</li>
<li>Why “we showed up in the answer” is still not a sufficient success metric</li>
</ul>

<ul>
<li>OpenAI: <a href="https://openai.com/index/powering-product-discovery-in-chatgpt/">https://openai.com/index/powering-product-discovery-in-chatgpt/</a></li>
<li>OpenAI Help: <a href="https://help.openai.com/en/articles/11128490-shopping-with-chatgpt-search">https://help.openai.com/en/articles/11128490-shopping-with-chatgpt-search</a></li>
<li>Shopify: <a href="https://www.shopify.com/news/agentic-commerce-momentum">https://www.shopify.com/news/agentic-commerce-momentum</a></li>
<li>Shopify Help: <a href="https://help.shopify.com/en/manual/online-sales-channels/agentic-storefronts/chatgpt">https://help.shopify.com/en/manual/online-sales-channels/agentic-storefronts/chatgpt</a></li>
<li>Google: <a href="https://blog.google/products-and-platforms/products/search/personal-intelligence-expansion/">https://blog.google/products-and-platforms/products/search/personal-intelligence-expansion/</a></li>
<li>Google India: <a href="https://blog.google/intl/en-in/products/explore-communicate/new-ways-google-is-using-ai-to-make-shopping-easier/">https://blog.google/intl/en-in/products/explore-communicate/new-ways-google-is-using-ai-to-make-shopping-easier/</a></li>
<li>Google UCP updates: <a href="https://blog.google/products-and-platforms/products/shopping/ucp-updates/">https://blog.google/products-and-platforms/products/shopping/ucp-updates/</a></li>
<li>Search Engine Land: <a href="https://searchengineland.com/google-updates-universal-commerce-protocol-to-help-retailers-sell-on-the-open-agentic-web-456891">https://searchengineland.com/google-updates-universal-commerce-protocol-to-help-retailers-sell-on-the-open-agentic-web-456891</a></li>
</ul>

<ul>
<li>EP017 Practitioner Worksheet — AI Commerce Control Check</li>
</ul>

<ul>
<li>EP014: Commerce Surface Check</li>
<li>EP015: Retained Artifact Check</li>
<li>EP016: National Capacity Check</li>
</ul>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2731432</link>
      <enclosure url="https://content.rss.com/episodes/372404/2731432/aichangedesk/2026_04_13_18_31_48_c7da4c96-a35f-4914-987d-7a234a718817.mp3" length="24723750" type="audio/mpeg"/>
      <guid isPermaLink="false">f8220026-2417-438a-8e04-4612677cea02</guid>
      <itunes:duration>1545</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:episode>17</itunes:episode>
      <podcast:episode>17</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Mon, 13 Apr 2026 14:02:56 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260413_090407_c3a02f1e598c26ddaabc7b1808b9eb07.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2731432/transcript" type="text/vtt"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2731432" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open"/>
        <psc:chapter start="2:30" title="What changed since EP014"/>
        <psc:chapter start="7:00" title="OpenAI and merchant-controlled checkout"/>
        <psc:chapter start="13:00" title="Shopify and AI commerce as channel ops"/>
        <psc:chapter start="18:30" title="Google personalization and protocols"/>
        <psc:chapter start="24:00" title="The measurement trap and what to do by Friday"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI Change Desk | EP013: Career Infrastructure Check]]></title>
      <itunes:title><![CDATA[AI Change Desk | EP013: Career Infrastructure Check]]></itunes:title>
      <description><![CDATA[
<p><strong>Summary</strong></p>
<p>AI is becoming career infrastructure before most schools, employers, and training systems know how to teach it, measure it, or distribute its benefits evenly. This episode looks at the education capability gap, worker compensation behavior, and the institutional response now forming around AI-shaped work.</p>
<p><strong>What changed</strong></p>
<ul>
<li>OpenAI argues that education systems need to close an AI capability gap as college-age adults become the biggest adopter cohort and advanced student users still lag well behind power-user behavior.</li>
<li>OpenAI says Americans are sending nearly 3 million messages per day to ChatGPT about wages, compensation, or earnings, making AI a live part of worker pay and career decisions.</li>
<li>Microsoft launched Elevate for Educators and free student career subscriptions with Copilot features, showing a two-track response: train the teacher and equip the student.</li>
<li>Microsoft and Victoria University launched a Datacentre Academy, signaling that AI-driven infrastructure demand is already reshaping workforce pipelines and training priorities.</li>
</ul>
<p><strong>What this means</strong></p>
<ul>
<li>Access is not the same as readiness.</li>
<li>Fluency is not the same as frequent use.</li>
<li>Institutions now have to answer career questions with more specificity, speed, and trust than they did before AI became the default guide in the browser.</li>
</ul>
<p><strong>Action block — Career infrastructure sweep (45 minutes)</strong></p>
<ol>
<li>Pick one career-facing workflow: internship prep, internal mobility, salary benchmarking, or educator training.</li>
<li>Identify where people are already using AI in that workflow.</li>
<li>Find one place where AI is faster than your official guidance.</li>
<li>Add one verification step and one named owner.</li>
<li>Define what “good use” looks like in plain language.</li>
</ol>
<p><strong>Sources</strong></p>
<ul>
<li>OpenAI: Ensuring AI use in education leads to opportunity</li>
</ul>
<p><a href="https://openai.com/index/ai-education-opportunity/">https://openai.com/index/ai-education-opportunity/</a></p>
<ul>
<li>OpenAI: Equipping workers with insights about compensation</li>
</ul>
<p><a href="https://openai.com/index/equipping-workers-with-insights-about-compensation/">https://openai.com/index/equipping-workers-with-insights-about-compensation/</a></p>
<ul>
<li>Microsoft: Elevate for Educators and new AI-powered tools</li>
</ul>
<p><a href="https://news.microsoft.com/source/2026/01/15/microsoft-expands-its-commitment-to-education-with-elevate-for-educators-program-and-new-ai-powered-tools/">https://news.microsoft.com/source/2026/01/15/microsoft-expands-its-commitment-to-education-with-elevate-for-educators-program-and-new-ai-powered-tools/</a></p>
<ul>
<li>Microsoft and Victoria University: Datacentre Academy</li>
</ul>
<p><a href="https://news.microsoft.com/source/asia/2026/03/27/datacentre-academy-vu/">https://news.microsoft.com/source/asia/2026/03/27/datacentre-academy-vu/</a></p>
<p><strong>Disclosure</strong></p>
<p>AI-assisted tools were used in parts of research and production support. Final editorial judgment, risk posture, and release approval stayed human-led. This is operational guidance, not legal advice. These are my opinions and not representative of any organization.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2677564</link>
      <enclosure url="https://content.rss.com/episodes/372404/2677564/aichangedesk/2026_03_31_13_00_24_a16e6e25-3662-4e13-a89b-9336d83bb523.mp3" length="22103152" type="audio/mpeg"/>
      <guid isPermaLink="false">a7d51352-da69-4095-9d2e-4f783d90f144</guid>
      <itunes:duration>1381</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:episode>13</itunes:episode>
      <podcast:episode>13</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Tue, 31 Mar 2026 12:00:00 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260331_010354_d71301f69e6a93dd2381927de2087b0d.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2677564/transcript" type="text/plain"/>
    </item>
    <item>
      <title><![CDATA[AI Change Desk | EP012: Work Visibility Check]]></title>
      <itunes:title><![CDATA[AI Change Desk | EP012: Work Visibility Check]]></itunes:title>
      <description><![CDATA[
<p>AI is moving from side-chat into the live work surface.</p>
<p>That means the next management problem is not just launch.</p>
<p>It is visibility.</p>
<p>Can you tell where adoption is real, where it is helping, and where the rollout is mostly theater?</p>
<p>This episode covers:</p>
<ul>
<li>write actions moving AI deeper into connected Google and Microsoft apps,</li>
<li>OpenAI's workspace analytics, analytics viewer role, and impact-survey layer,</li>
<li>and one practical Adoption Visibility Sweep you can run before Friday.</li>
</ul>

<ul>
<li>OpenAI's March 13 enterprise release notes show ChatGPT supporting write actions for connected Google Docs, Google Sheets, and calendar apps, plus Microsoft Outlook email and calendar actions.</li>
<li>OpenAI's workspace analytics rollout includes an analytics viewer role, and the March 20 release notes added Admin-created surveys and moved OpenAI-created impact surveys to begin on or after March 31.</li>
<li>OpenAI's March 5 Adoption news channel makes the vendor shift clear: adoption visibility is now part of the product story.</li>
<li>OpenAI's March 11 Wayfair case study gives a concrete example of workflow-level deployment with measurable, vendor-reported results.</li>
</ul>

<p>If AI is now editing the work where the work already lives, leaders need a cleaner way to tell:</p>
<ul>
<li>whether usage is real,</li>
<li>whether outcomes improved,</li>
<li>and where friction is still hiding.</li>
</ul>

<p>Run a 45-minute Adoption Visibility Sweep:</p>
<ol>
<li>Pick one workflow.</li>
<li>Name the artifact that matters.</li>
<li>Track usage, outcome, and friction.</li>
<li>Ask one manager where the change is real and where it is still cosmetic.</li>
<li>Make one Friday decision: train, simplify, standardize, or pause.</li>
</ol>

<ul>
<li>OpenAI Help Center release notes: <a href="https://help.openai.com/en/articles/10128477-chatgpt-enterprise-edu-release-notes">https://help.openai.com/en/articles/10128477-chatgpt-enterprise-edu-release-notes</a></li>
<li>OpenAI workspace analytics: <a href="https://help.openai.com/en/articles/10875114-workspace-analytics-for-chatgpt-enterprise-and-edu">https://help.openai.com/en/articles/10875114-workspace-analytics-for-chatgpt-enterprise-and-edu</a></li>
<li>OpenAI adoption news channel: <a href="https://openai.com/index/introducing-the-adoption-news-channel/">https://openai.com/index/introducing-the-adoption-news-channel/</a></li>
<li>OpenAI x Wayfair case study: <a href="https://openai.com/index/wayfair/">https://openai.com/index/wayfair/</a></li>
<li>Microsoft Wave 3: <a href="https://www.microsoft.com/en-us/microsoft-365/blog/2026/03/09/powering-frontier-transformation-with-copilot-and-agents/">https://www.microsoft.com/en-us/microsoft-365/blog/2026/03/09/powering-frontier-transformation-with-copilot-and-agents/</a></li>
</ul>

<p>AI-assisted tools were used in parts of the research and production workflow.</p>
<p>Final editorial judgment, risk posture, and release approval stayed human-led.</p>
<p>This is operational guidance, not legal advice.</p>
<p>These are my opinions and are not representative of any organization.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2660667</link>
      <enclosure url="https://content.rss.com/episodes/372404/2660667/aichangedesk/2026_03_25_11_54_34_471b0573-da2d-4425-9eda-9fa857ae6aa3.mp3" length="7386389" type="audio/mpeg"/>
      <guid isPermaLink="false">360b9b44-facb-4dc3-b0df-e5f291bd47d6</guid>
      <itunes:duration>461</itunes:duration>
      <itunes:episodeType>bonus</itunes:episodeType>
      <itunes:episode>12</itunes:episode>
      <podcast:episode>12</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Wed, 25 Mar 2026 11:54:29 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260325_110327_a7d2c45943433b0c2c4ed3817945ad11.png"/>
    </item>
    <item>
      <title><![CDATA[AI Change Desk | EP011: Control Surface Check]]></title>
      <itunes:title><![CDATA[AI Change Desk | EP011: Control Surface Check]]></itunes:title>
      <description><![CDATA[<p>AI CHANGE DESK | EP011: CONTROL SURFACE CHECK</p>
<p>EPISODE SUMMARY</p>
<p>AI is getting harder to manage for one simple reason: it is disappearing into the normal work surface.</p>
<p>This week’s episode looks at three connected signals:</p>
<p>• Google pushing Gemini deeper into Docs, Sheets, Slides, and Drive</p>
<p>• Anthropic research showing people question polished AI output less once it looks finished</p>
<p>• OpenAI positioning GPT-5.4 for professional work, which turns model choice into a cost, confidence, and review-burden decision</p>
<p>If episode 8 was validate before you scale, episode 9 was harden the controls, and episode 10 was name the owners at the handoff, episode 11 is the next layer:</p>
<p>what happens when AI stops feeling like a separate tool and starts feeling like ordinary work.</p>
<p>WHAT CHANGED</p>
<p>• Google is embedding Gemini more deeply into the files people already live in, making AI feel less like a separate stop and more like part of the default work surface.</p>
<p>• Anthropic’s AI Fluency Index found that users iterate a lot, but they become less critical once Claude produces polished artifacts like code, documents, and interactive outputs.</p>
<p>• OpenAI is positioning GPT-5.4 for professional work and saying it improves factual performance versus GPT-5.2, which makes model choice less about hype and more about acceptable error and review burden.</p>
<p>WHAT THIS MEANS FOR OPERATORS</p>
<p>The management problem is no longer just tool approval.</p>
<p>It is:</p>
<p>• where inside normal work the human still needs to slow down</p>
<p>• how teams keep skepticism alive after output starts looking finished</p>
<p>• which workflows deserve the fastest model versus the most trusted model</p>
<p>WHAT I’D DECIDE BY FRIDAY</p>
<p>1. Pick one default work surface and mark three friction points where a human has to slow down.</p>
<p>2. Teach one collaboration habit people will actually use: what is missing, what should I verify, or where is confidence weak.</p>
<p>3. Separate the fast model from the trusted model instead of pretending one default fits every workflow.</p>
<p>LISTENER QUESTION</p>
<p>Where is your bigger gap right now: noticing AI inside normal work, challenging polished output, or choosing the right model for the job?</p>
<p>LISTEN AND WATCH</p>
<p>• Episode page: https://www.michaelhbm.com/AIChangeDesk/episodes/ep011-control-surface-check.html</p>
<p>• Archive: https://www.michaelhbm.com/AIChangeDesk</p>
<p>• Apple Podcasts: https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295</p>
<p>• Spotify: https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD</p>
<p>SOURCES</p>
<p>• https://openai.com/index/introducing-gpt-5-4/</p>
<p>• https://openai.com/index/introducing-gpt-5-2/</p>
<p>• https://techcrunch.com/2026/03/05/openai-launches-gpt-5-4-with-pro-and-thinking-versions/</p>
<p>• https://blog.google/products-and-platforms/products/workspace/gemini-workspace-updates-march-2026/</p>
<p>• https://techcrunch.com/2026/03/10/google-rolls-out-new-gemini-capabilities-to-docs-sheets-slides-and-drive/</p>
<p>• https://www.anthropic.com/research/AI-fluency-index</p>
<p>• https://www.forbes.com/sites/danfitzpatrick/2026/02/23/anthropics-new-ai-index-shows-what-sets-top-ai-users-apart/</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2654777</link>
      <enclosure url="https://content.rss.com/episodes/372404/2654777/aichangedesk/2026_03_23_21_53_20_36f9f644-c2b9-438c-b870-c179db646263.mp3" length="17843556" type="audio/mpeg"/>
      <guid isPermaLink="false">0f4e1c18-5ec6-490e-b52e-d4e1b9e9f179</guid>
      <itunes:duration>1115</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:episode>11</itunes:episode>
      <podcast:episode>11</podcast:episode>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Mon, 23 Mar 2026 12:00:00 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260323_100342_bb98f14b127692a21d1986d70cf93654.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2654777/transcript" type="text/plain"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2654777" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open: AI disappears into normal work"/>
        <psc:chapter start="1:55" title="Why these three signals belong together"/>
        <psc:chapter start="5:04" title="Story 1: Gemini inside the default work surface"/>
        <psc:chapter start="8:41" title="Story 2: why polished output gets trusted too fast"/>
        <psc:chapter start="12:12" title="Story 3: GPT-5.4 and the new error budget"/>
        <psc:chapter start="15:46" title="Where teams get this wrong"/>
        <psc:chapter start="17:11" title="What I'd decide by Friday"/>
        <psc:chapter start="18:12" title="Close"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI Change Desk | EP016: National Capacity Check]]></title>
      <itunes:title><![CDATA[AI Change Desk | EP016: National Capacity Check]]></itunes:title>
      <description><![CDATA[
<p>This week is not really a feature week.</p>
<p>It is a capacity week.</p>
<p>Anthropic's compute expansion, Project Glasswing, and Microsoft's nation-scale AI commitments in Japan and Singapore all point to the same shift: compute, defense, and skills are starting to move together.</p>

<ul>
<li>Anthropic expanded its Google Cloud and Broadcom partnership to secure multiple gigawatts of additional TPU capacity starting in 2027.</li>
<li>Anthropic launched Project Glasswing, a defensive cybersecurity initiative with more than 40 organizations, up to $100 million in credits over five years, and access to non-public defensive model support.</li>
<li>Microsoft paired nation-scale AI infrastructure and workforce commitments in Japan and Singapore, including infrastructure investment, cybersecurity collaboration, and large-scale skills programs.</li>
</ul>

<p>If the serious players are organizing compute, security, and skills together, AI is no longer just another software category.</p>
<p>That means leaders need to think about provider concentration, defensive coordination, and workforce readiness as one operating board instead of three separate conversations.</p>

<ul>
<li>Name your top two AI-provider dependencies.</li>
<li>Identify one workflow where provider concentration is now a real operating risk.</li>
<li>Confirm who owns external AI-safety or cyber-intake if a report lands.</li>
<li>Check where AI training is actually happening, not just where it is theoretically available.</li>
<li>Write down one future-access assumption your team is making without evidence.</li>
</ul>

<ul>
<li>Anthropic: Google/Broadcom partnership on compute (<a href="https://www.anthropic.com/news/google-broadcom-partnership-compute">link</a>)</li>
<li>Reuters: Anthropic expands Google and Broadcom partnership on AI compute (<a href="https://www.reuters.com/world/us/anthropic-expand-google-cloud-partnership-broadcom-help-power-ai-computing-2026-04-06/">link</a>)</li>
<li>Anthropic: Project Glasswing (<a href="https://www.anthropic.com/glasswing">link</a>)</li>
<li>The Verge: Anthropic launches Project Glasswing (<a href="https://www.theverge.com/news/649536/anthropic-launches-project-glasswing-cybersecurity-initiative">link</a>)</li>
<li>Microsoft: Japan AI infrastructure, cybersecurity, and workforce commitment (<a href="https://news.microsoft.com/source/asia/2026/04/03/microsoft-deepens-its-commitment-to-japan-with-10-billion-investment-in-ai-infrastructure-cybersecurity-workforce/">link</a>)</li>
<li>Microsoft: Singapore AI access and skills commitment (<a href="https://news.microsoft.com/source/asia/2026/04/01/microsoft-will-make-ai-tools-and-skills-accessible-to-all-singapore-residents/">link</a>)</li>
<li>Reuters: Microsoft to invest $10 billion in Japan for AI infrastructure and skills (<a href="https://www.reuters.com/world/asia-pacific/microsoft-invest-10-billion-japan-ai-cloud-infrastructure-nikkei-reports-2026-04-02/">link</a>)</li>
<li>The Straits Times: Microsoft to invest more in Singapore and expand AI skilling (<a href="https://www.straitstimes.com/tech/microsoft-to-invest-more-in-singapore-expand-ai-skilling-efforts">link</a>)</li>
</ul>

<p>This is operational analysis, not legal advice.</p>
<p>AI-assisted tools may be used in parts of the research and production workflow. Final editorial judgment, risk posture, and release approval stay human-led.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2711968</link>
      <enclosure url="https://content.rss.com/episodes/372404/2711968/aichangedesk/2026_04_09_04_10_09_383601c7-b6bf-475c-8dbe-4d2d299e9bbb.mp3" length="8490047" type="audio/mpeg"/>
      <guid isPermaLink="false">18b18eea-32b1-4ebf-88ee-ecb2c40afdf2</guid>
      <itunes:duration>530</itunes:duration>
      <itunes:episodeType>bonus</itunes:episodeType>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Thu, 09 Apr 2026 12:00:00 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260409_040403_881aee753d3eda1910e6e4548f731f3c.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2711968/transcript" type="text/vtt"/>
    </item>
    <item>
      <title><![CDATA[AI Change Desk | EP015: Retained Artifact Check]]></title>
      <itunes:title><![CDATA[AI Change Desk | EP015: Retained Artifact Check]]></itunes:title>
      <description><![CDATA[<p>AI CHANGE DESK | EP015: RETAINED ARTIFACT CHECK</p>
<p>AI work no longer disappears when the chat ends.</p>
<p>This week's main episode looks at what changes when AI artifacts stick around: files stay in Library until deletion, model cutovers need verification after the deadline passes, open-model options are getting more credible, pricing shifts can show up as same-week variance, and outside-in safety reporting only helps if someone owns intake.</p>
<p>EPISODE SUMMARY</p>
<p>The point for EP015 is simple: once AI work survives the session that created it, convenience has already turned into ownership. That means retention, reuse, verification, deletion, cost review, and safety intake all need named owners instead of vague good intentions.</p>
<p>WHAT CHANGED THIS WEEK</p>
<p>• OpenAI's File Library guidance makes it clear that uploaded and created files can persist in ChatGPT until users delete them manually.</p>
<p>• The GPT-4o Custom GPT cutoff is now past the April 3 line, which means teams should be doing post-cutoff verification instead of waiting for a migration window.</p>
<p>• Google's Gemma 4 launch makes hosted-versus-open optionality more practical, not just theoretical.</p>
<p>• OpenAI's built-in tool session pricing is now live, while the Safety Bug Bounty keeps outside-in reporting active.</p>
<p>WHAT THIS MEANS FOR OPERATORS</p>
<p>Treat retained AI artifacts like reusable work objects, not disposable prompt residue. If a file, prompt pattern, or Custom GPT behavior can carry forward into next week, it needs a retention tier, a deletion expectation, and a named owner who can answer for reuse.</p>
<p>WHAT I'D DECIDE BY FRIDAY</p>
<p>• Name one owner for retained-file policy in the workflows that matter most.</p>
<p>• Run a post-cutoff verification pass on the Custom GPTs people actually depend on.</p>
<p>• Check same-week cost variance on any workflow touching built-in tool sessions.</p>
<p>• Name a real intake owner and SLA for outside-in safety or abuse reports.</p>
<p>• Decide whether one open-model evaluation lane is worth running this quarter.</p>
<p>LISTENER QUESTION</p>
<p>What is your team currently keeping, reusing, or routing forward through AI systems without a named owner?</p>
<p>SOURCES</p>
<p>• OpenAI release notes: https://help.openai.com/en/articles/6825453-chatgpt-release-notes</p>
<p>• OpenAI File Storage and Library: https://help.openai.com/en/articles/20001052-file-storage-and-library-in-chatgpt</p>
<p>• OpenAI Business / Enterprise model limits: https://help.openai.com/en/articles/12003714-chatgpt-business-models-limits</p>
<p>• Google Gemma 4 announcement: https://blog.google/innovation-and-ai/technology/developers-tools/gemma-4/</p>
<p>• Google for Developers coverage: https://developers.googleblog.com/en/gemma-4/</p>
<p>• OpenAI API pricing: https://openai.com/api/pricing/</p>
<p>• OpenAI developer pricing docs: https://developers.openai.com/api/docs/pricing</p>
<p>• OpenAI Safety Bug Bounty: https://openai.com/index/safety-bug-bounty/</p>
<p>• OpenAI company announcements: https://openai.com/news/company-announcements/</p>
<p>DISCLOSURE</p>
<p>AI-assisted tools were used in research, packaging, and production support. Final editorial judgment, release approval, and risk decisions remain human-led.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2692842</link>
      <enclosure url="https://content.rss.com/episodes/372404/2692842/aichangedesk/2026_04_07_22_14_36_9db6a3d4-77fc-4d6d-a110-3f276ec948a8.mp3" length="26719002" type="audio/mpeg"/>
      <guid isPermaLink="false">7f23467e-018c-431a-b2ee-54cb96a9992a</guid>
      <itunes:duration>1669</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Mon, 06 Apr 2026 12:00:00 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260406_050458_7cb6fc7ceebb26388843e40d1964e8c5.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2692842/transcript" type="text/vtt"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2692842" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open — the artifact outlives the chat"/>
        <psc:chapter start="1:56.220" title="What changed this week — persistence, cutoffs, and live clocks"/>
        <psc:chapter start="6:25.510" title="Retained artifacts are a control surface now"/>
        <psc:chapter start="10:54.800" title="Where teams get this wrong"/>
        <psc:chapter start="14:30.240" title="The hidden tradeoff — continuity creates residue"/>
        <psc:chapter start="18:5.670" title="Model cutoffs, open-model pilots, and cost drift"/>
        <psc:chapter start="23:28.830" title="What I would decide by Friday"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI Change Desk | EP014: Commerce Surface Check]]></title>
      <itunes:title><![CDATA[AI Change Desk | EP014: Commerce Surface Check]]></itunes:title>
      <description><![CDATA[
<p>ChatGPT is becoming a product-discovery surface, not just a side assistant.</p>
<p>This episode looks at what changes when shoppers start comparing products inside AI before they ever land on a retailer site, and what operators should measure before they confuse visibility with conversion.</p>

<ul>
<li>OpenAI upgraded shopping in ChatGPT on March 24, 2026 with richer product comparisons, image-based finding, and better data freshness and coverage.</li>
<li>Shopify said millions of merchants can now sell in AI chats, with ChatGPT referral attribution and merchant-of-record control.</li>
<li>Sephora launched an app in ChatGPT tied to beauty guidance, loyalty benefits, and future in-app checkout.</li>
</ul>

<p>A new surface matters when it changes where decisions begin.</p>
<p>If discovery starts in AI before it starts on your site, your old reporting stack only sees the second half of the journey.</p>
<p>That means product data quality, merchant attribution, and category readiness matter more than another generic “we show up in AI” status update.</p>

<ul>
<li>Pick one category where comparison behavior is already common.</li>
<li>Audit titles, attributes, images, price accuracy, reviews, and availability.</li>
<li>Define an AI-originated attribution view before the internal story turns into vibes.</li>
<li>Decide whether you are optimizing only for discovery or for deeper merchant-side experiences too.</li>
<li>Put one owner on the workflow.</li>
</ul>

<ul>
<li>OpenAI: Powering Product Discovery in ChatGPT (<a href="https://openai.com/index/powering-product-discovery-in-chatgpt/">link</a>)</li>
<li>OpenAI Help Center: ChatGPT Release Notes (<a href="https://help.openai.com/en/articles/6825453-chatgpt-release-notes">link</a>)</li>
<li>Shopify: Millions of merchants can sell in AI chats (<a href="https://www.shopify.com/news/agentic-commerce-momentum">link</a>)</li>
<li>Sephora Newsroom: Sephora App in ChatGPT Brings a New Personalized Beauty Experience (<a href="https://newsroom.sephora.com/sephora-app-in-chatgpt-brings-a-new-personalized-beauty-experience/">link</a>)</li>
<li>OpenAI Help Center: Shopping with ChatGPT Search (<a href="https://help.openai.com/en/articles/11128490-improved-shopping-results-from-chatgpt-search">link</a>)</li>
</ul>

<p>This is operational analysis, not legal advice.</p>
<p>AI-assisted tools may be used in parts of the research and production workflow. Final editorial judgment, risk posture, and release approval stay human-led.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2679628</link>
      <enclosure url="https://content.rss.com/episodes/372404/2679628/aichangedesk/2026_04_01_05_34_40_71fc0a46-c782-4825-a033-23a714c80138.mp3" length="11244159" type="audio/mpeg"/>
      <guid isPermaLink="false">33a3d1db-b149-40f6-a112-654e21e4f48b</guid>
      <itunes:duration>702</itunes:duration>
      <itunes:episodeType>bonus</itunes:episodeType>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Wed, 01 Apr 2026 12:00:00 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260401_070447_54e18a9c93f824be4a2c279769e829c9.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2679628/transcript" type="text/plain"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2679628" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open — the shopper starts in ChatGPT"/>
        <psc:chapter start="1:05" title="What changed — OpenAI, Shopify, and Sephora move discovery upstream"/>
        <psc:chapter start="3:20" title="Why this matters — product discovery becomes an operating surface"/>
        <psc:chapter start="5:40" title="Where teams get this wrong — presence is not performance"/>
        <psc:chapter start="7:40" title="The hidden tradeoff — more intent, less control"/>
        <psc:chapter start="9:35" title="What I would decide by Friday — the commerce surface sweep"/>
        <psc:chapter start="11:05" title="Close — what should you measure first?"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI Brief | EP010: Evaluation and Ownership Check]]></title>
      <itunes:title><![CDATA[AI Brief | EP010: Evaluation and Ownership Check]]></itunes:title>
      <description><![CDATA[
<p>If Episode 9 was about hardening controls, Episode 10 is about making those controls survive real handoffs.</p>

<ul>
<li>AWS published a stakeholder-focused guide for operationalizing agentic AI, reinforcing that ownership must be explicit before scale.</li>
<li>YouTube expanded likeness-detection protections for civic leaders and journalists, signaling that identity-integrity response is now an operating requirement.</li>
<li>Anthropic announced a Sydney APAC office expansion, reinforcing that control design must travel across regions without losing clarity.</li>
</ul>

<ul>
<li>"Approved app" is no longer enough; action-level workflow gates are the unit of control.</li>
<li>Detection tools are useful, but they fail without named routing, pause authority, and response ownership.</li>
<li>Handoffs are the drift zone: reviewer ambiguity, escalation lag, and rollback uncertainty.</li>
</ul>

<ol>
<li>Pick top two high-impact AI workflows.</li>
<li>Assign four named owners per workflow: approver, pause owner, rollback owner, public-response owner.</li>
<li>Add one hard gate: no owner, no launch.</li>
<li>Run one impersonation tabletop and log decisions.</li>
<li>Ship a one-page operator memo (changes, approvals, restrictions, exceptions, next review date).</li>
</ol>

<p>AI-assisted tools were used in parts of research and production support. Final editorial judgment and release approval remained human-led. This is operational guidance, not legal advice.</p>

<ul>
<li><a href="https://aws.amazon.com/blogs/machine-learning/operationalizing-agentic-ai-part-1-a-stakeholders-guide/">https://aws.amazon.com/blogs/machine-learning/operationalizing-agentic-ai-part-1-a-stakeholders-guide/</a></li>
<li><a href="https://blog.youtube/news-and-events/expanding-our-likeness-detection-tools-to-protect-civic-leaders-journalists-and-more/">https://blog.youtube/news-and-events/expanding-our-likeness-detection-tools-to-protect-civic-leaders-journalists-and-more/</a></li>
<li><a href="https://techcrunch.com/2026/03/10/youtube-ai-deepfake-detection-politicians-government-officials-journalists/">https://techcrunch.com/2026/03/10/youtube-ai-deepfake-detection-politicians-government-officials-journalists/</a></li>
<li><a href="https://www.anthropic.com/news/sydney-fourth-office-asia-pacific">https://www.anthropic.com/news/sydney-fourth-office-asia-pacific</a></li>
</ul>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2638882</link>
      <enclosure url="https://content.rss.com/episodes/372404/2638882/aichangedesk/2026_03_18_15_34_37_20eda106-16d0-4145-9c59-8eb4121596ea.mp3" length="8119377" type="audio/mpeg"/>
      <guid isPermaLink="false">5c6a67e9-13a9-4f1e-bd9f-a08f2688b878</guid>
      <itunes:duration>507</itunes:duration>
      <itunes:episodeType>bonus</itunes:episodeType>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Wed, 18 Mar 2026 12:00:00 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260318_120328_3db782540b593edddf152b723537cc11.png"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2638882" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open + disclosure + boundary"/>
        <psc:chapter start="1:23" title="Story 1: Ownership before autonomy"/>
        <psc:chapter start="4:25" title="Story 2: Identity-integrity response"/>
        <psc:chapter start="6:40" title="45-minute Handoff and Trust Sweep"/>
        <psc:chapter start="7:45" title="Close + listener question"/>
      </psc:chapters>
    </item>
    <item>
      <title><![CDATA[AI Change Desk | EP009: Control Hardening Week]]></title>
      <itunes:title><![CDATA[AI Change Desk | EP009: Control Hardening Week]]></itunes:title>
      <description><![CDATA[<p>AI CHANGE DESK | EP009: CONTROL HARDENING WEEK</p>
<p>EPISODE SUMMARY</p>
<p>This episode translates this week’s control signals into one operator contract:</p>
<p>• legal claim confidence,</p>
<p>• monitoring evidence,</p>
<p>• suite-level governance assumptions,</p>
<p>• and fallback ownership.</p>
<p>This is the bridge from:</p>
<p>• Episode 5 (access control),</p>
<p>• Episode 6 (continuity),</p>
<p>• Episode 7 (security workflow contract),</p>
<p>• Episode 8 (release validation).</p>
<p>WHAT CHANGED THIS WEEK</p>
<p>1. OpenAI published a legal notice on unauthorized equity transactions (Mar 12, 2026).</p>
<p>2. Anthropic launched The Anthropic Institute and expanded policy posture signals (Mar 11, 2026).</p>
<p>3. NIST advanced practical guidance for monitoring deployed AI systems (Mar 6 + Mar 9, 2026).</p>
<p>4. Microsoft framed Agent 365 + E7 Frontier as an integrated intelligence/trust operating model (Mar 9, 2026).</p>
<p>5. Podbean switched off dynamic ad insertion in EEA/EU/UK, highlighting continuity dependency risk (Mar 10, 2026).</p>
<p>OPERATIONAL TRANSLATION</p>
<p>• If a vendor claim cannot be evidenced, treat it as risk until validated.</p>
<p>• Monitoring must be release-adjacent and reconstructable, not dashboard-only.</p>
<p>• Suite selection is an operating-model decision, not only a capability decision.</p>
<p>• Continuity planning must cover policy/platform dependency changes, not just system outages.</p>
<p>MONDAY ACTION BLOCK (45 MINUTES)</p>
<p>1. Triage this week’s legal/monitoring/suite/continuity signals.</p>
<p>2. Exposure-map top five workflows.</p>
<p>3. Lock owners for thresholds, exceptions, rollback, and communications.</p>
<p>4. Send one operator memo: changed / approved / restricted / exception path.</p>
<p>5. Set due dates and next review checkpoint.</p>
<p>LISTENER QUESTION</p>
<p>Where is your highest exposure this week:</p>
<p>• claim confidence,</p>
<p>• monitoring evidence,</p>
<p>• or fallback ownership?</p>
<p>WATCH + LISTEN</p>
<p>• Episode hub: https://www.michaelhbm.com/AIChangeDesk</p>
<p>• YouTube channel: https://www.youtube.com/@AIChangeDesk</p>
<p>• RSS feed: https://media.rss.com/aichangedesk/feed.xml</p>
<p>• Apple Podcasts: https://podcasts.apple.com/us/podcast/ai-change-desk/id1876677295</p>
<p>• Spotify: https://open.spotify.com/show/5X1sLLTeULqFCdt7aaisGD</p>
<p>SOURCES</p>
<p>• https://openai.com/policies/unauthorized-openai-equity-transactions/</p>
<p>• https://www.anthropic.com/news/the-anthropic-institute</p>
<p>• https://www.axios.com/2026/03/11/anthropic-dc-presence</p>
<p>• https://www.nist.gov/news-events/news/2026/03/new-report-challenges-monitoring-deployed-ai-systems</p>
<p>• https://www.nist.gov/publications/challenges-monitoring-deployed-ai-systems-center-ai-standards-and-innovation</p>
<p>• https://blogs.microsoft.com/blog/2026/03/09/introducing-the-first-frontier-suite-built-on-intelligence-trust/</p>
<p>• https://www.microsoft.com/en-us/security/blog/2026/03/09/secure-agentic-ai-for-your-frontier-transformation/</p>
<p>• https://podnews.net/update/podbean-switches-off-dai</p>
<p>• https://www.reddit.com/r/podcasting/comments/1rozrcy/dynamic_ads_policy_change/</p>
<p>DISCLOSURE</p>
<p>AI-assisted tools were used in parts of research and production support. Final editorial judgment, risk posture, and release approval remained human-led. This is operational guidance, not legal advice.</p>]]></description>
      <link>https://rss.com/podcasts/aichangedesk/2632316</link>
      <enclosure url="https://content.rss.com/episodes/372404/2632316/aichangedesk/2026_03_16_14_26_33_cf0a2c4e-1161-49b4-9eb8-70ddf281a5f4.mp3" length="21308856" type="audio/mpeg"/>
      <guid isPermaLink="false">44260a35-69e3-40f0-b487-508e09825fa4</guid>
      <itunes:duration>1331</itunes:duration>
      <itunes:episodeType>full</itunes:episodeType>
      <itunes:explicit>false</itunes:explicit>
      <pubDate>Mon, 16 Mar 2026 12:00:00 GMT</pubDate>
      <itunes:image href="https://media.rss.com/aichangedesk/ep_cover_20260316_020329_e079f9edd12e9bb5fe7bdc66d9e0df86.png"/>
      <podcast:transcript url="https://transcripts.rss.com/372404/2632316/transcript" type="text/plain"/>
      <podcast:chapters url="https://apollo.rss.com/chapters/2632316" type="application/json+chapters"/>
      <psc:chapters>
        <psc:chapter start="0" title="Cold open + disclosure"/>
        <psc:chapter start="1:50" title="Continuity bridge from episodes five to eight"/>
        <psc:chapter start="4:10" title="Story 1 — legal signal hardening and claim confidence"/>
        <psc:chapter start="8:30" title="Story 2 — monitoring as release control"/>
        <psc:chapter start="12:55" title="Story 3 — suite operating model and portability risk"/>
        <psc:chapter start="16:45" title="Story 4 — continuity and dependency fallback ownership"/>
        <psc:chapter start="19:20" title="45-minute control block and scorecard"/>
        <psc:chapter start="21:05" title="Close + listener question"/>
      </psc:chapters>
    </item>
  </channel>
</rss>