<rss xmlns:atom="http://www.w3.org/2005/Atom" version="2.0">
    <channel>
        <title>NVIDIA - Tag - Simi Studio</title>
        <link>/en/tags/nvidia/</link>
        <description>NVIDIA - Tag - Simi Studio</description>
        <generator>Hugo -- gohugo.io</generator><language>en</language><managingEditor>simi@simi.studio (Simi)</managingEditor>
            <webMaster>simi@simi.studio (Simi)</webMaster><lastBuildDate>Sat, 15 Jul 2023 10:00:00 &#43;0800</lastBuildDate><atom:link href="/en/tags/nvidia/" rel="self" type="application/rss+xml" /><item>
    <title>Running LLMs Locally in 2023: Hardware Configs for Every Budget</title>
    <link>/en/posts/hardware-for-local-ai-2023/</link>
    <pubDate>Sat, 15 Jul 2023 10:00:00 &#43;0800</pubDate>
    <author>simi@simi.studio (Simi)</author>
    <guid>/en/posts/hardware-for-local-ai-2023/</guid>
    <description><![CDATA[Running LLMs locally is getting popular, but what hardware should you buy at different budgets? This article provides real benchmark data to help you choose the right configuration. No product promotion, just objective numbers.]]></description>
</item>
</channel>
</rss>
