<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
  <channel>
    <title>Reinforcement-Learning on emsenn.net</title>
    <link>https://emsenn.net/tags/reinforcement-learning/</link>
    <description>Recent content in Reinforcement-Learning on emsenn.net</description>
    <generator>Hugo</generator>
    <language>en-us</language>
    <lastBuildDate>Sat, 11 Oct 2025 00:00:00 +0000</lastBuildDate>
    <atom:link href="https://emsenn.net/tags/reinforcement-learning/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>Stability Optimization in Artificial Agents</title>
      <link>https://emsenn.net/library/domains/engineering/domains/tech/domains/computing/domains/artificial-intelligence/texts/describing-stability-optimization-in-artificial-agents/</link>
      <pubDate>Sat, 11 Oct 2025 00:00:00 +0000</pubDate>
      <guid>https://emsenn.net/library/domains/engineering/domains/tech/domains/computing/domains/artificial-intelligence/texts/describing-stability-optimization-in-artificial-agents/</guid>
      <description>&lt;h2 id=&#34;abstract&#34;&gt;Abstract&lt;/h2&gt;&#xA;&lt;p&gt;In RLHF, a model adapts to a fixed feedback distribution. This is the asymmetric stability setting of &lt;em&gt;Information-Theoretic Stability as Reward Function&lt;/em&gt;: the model&amp;rsquo;s stability reward is a function of its coupling to the feedback source, not an intrinsic property. We show that RLHF training dynamics instantiate the two-channel decomposition — alignment progress equals behavioral convergence plus responsiveness increase — and that the MI-dominates-marginal inequality gives a structural constraint on training: the model&amp;rsquo;s responsiveness to feedback can never be doing worse than its behavioral convergence. These results reframe alignment as an informational accounting problem rather than a moral one.&lt;/p&gt;</description>
    </item>
  </channel>
</rss>
