<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: Aggregate size for small scale implementations in Network and Storage Protocols</title>
    <link>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28125#M2521</link>
    <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;Sounds like a very reasonable plan to me...&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;I hope this response has been helpful to you.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;At your service,&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;Eugene E. Kashpureff&lt;BR /&gt;&lt;A class="jive-link-email-small" href="mailto:ekashp@kashpureff.org" target="_blank"&gt;ekashp@kashpureff.org&lt;/A&gt;&lt;BR /&gt;NetApp Instructor and Independent Consultant&lt;BR /&gt;&lt;A class="jive-link-external-small" href="http://www.linkedin.com/in/eugenekashpureff" target="_blank"&gt;http://www.linkedin.com/in/eugenekashpureff&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;(P.S. I appreciate points for helpful or correct answers.)&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
    <pubDate>Wed, 29 Dec 2010 02:47:37 GMT</pubDate>
    <dc:creator>ekashpureff</dc:creator>
    <dc:date>2010-12-29T02:47:37Z</dc:date>
    <item>
      <title>Aggregate size for small scale implementations</title>
      <link>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28102#M2514</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;I currently have a FAS2040 + DS14 that I will be using for NFS/VMware&amp;nbsp; and would like to get some recommendations on aggregate size and&amp;nbsp; planning for a future purchase of an extra controller + shelf.&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;Below is the default configuration the Netapp tech setup a few months ago.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;12 x 1TB 7200 disk in unit (SATA)&lt;/P&gt;&lt;P&gt;14 x 1TB 7200 disk on shelf (FC-ATA)&lt;/P&gt;&lt;P&gt;1 raid group&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;aggr0 with 3 disk in unit for system root&lt;/P&gt;&lt;P&gt;aggr1 with 7 disk in unit (2 spare)&lt;/P&gt;&lt;P&gt;aggr2 with 11 disk on shelf (3 spare)&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Since these are only 7200 SATA disks, I would like to configure for the best possible performance.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Should I change the configuration and combine aggr1 + aggr2 into one aggregate for better performance?&lt;/P&gt;&lt;P&gt;Is it recommended to mix the disks on the unit and the external shelf in an aggregate even though they are both SATA?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Keep in mind that I would like to expand this setup without&amp;nbsp; having to move data around when I purchase another controller + shelf in&amp;nbsp; the future.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Thanks,&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;B&lt;/P&gt;&lt;DIV&gt; &lt;/DIV&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Thu, 05 Jun 2025 07:02:54 GMT</pubDate>
      <guid>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28102#M2514</guid>
      <dc:creator>brasbehlph</dc:creator>
      <dc:date>2025-06-05T07:02:54Z</dc:date>
    </item>
    <item>
      <title>Re: Aggregate size for small scale implementations</title>
      <link>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28107#M2516</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;B -&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Yes, you should combine aggr0 and aggr1.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;It makes no sense at all to me that some techs will deploy a dedicated aggr0 for vol0.&lt;/P&gt;&lt;P&gt;It's&amp;nbsp; a waste of disk space.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;I hope this response has been helpful to you.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;At your service,&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;Eugene E. Kashpureff&lt;BR /&gt;&lt;A class="jive-link-email-small" href="mailto:ekashp@kashpureff.org" target="_blank"&gt;ekashp@kashpureff.org&lt;/A&gt;&lt;BR /&gt;NetApp Instructor and Independent Consultant&lt;BR /&gt;&lt;A class="jive-link-external-small" href="http://www.linkedin.com/in/eugenekashpureff" target="_blank"&gt;http://www.linkedin.com/in/eugenekashpureff&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;(P.S. I appreciate points for helpful or correct answers.)&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Tue, 28 Dec 2010 16:31:41 GMT</pubDate>
      <guid>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28107#M2516</guid>
      <dc:creator>ekashpureff</dc:creator>
      <dc:date>2010-12-28T16:31:41Z</dc:date>
    </item>
    <item>
      <title>Re: Aggregate size for small scale implementations</title>
      <link>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28112#M2518</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;I agree with Eugene. It's a waste of disk space when you create a distinct aggregate to vol0.&lt;/P&gt;&lt;P&gt;IMHO, thinking in future migrations, you shouldn't mix internal disks with shelves disks.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;See you!&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Nasc&lt;/P&gt;&lt;P&gt;NetApp - Enjoy it!&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Tue, 28 Dec 2010 16:40:28 GMT</pubDate>
      <guid>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28112#M2518</guid>
      <dc:creator>RodrigoNascimento</dc:creator>
      <dc:date>2010-12-28T16:40:28Z</dc:date>
    </item>
    <item>
      <title>Re: Aggregate size for small scale implementations</title>
      <link>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28120#M2520</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;Thanks for the quick response, so based on your reply I would have the following?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;aggr0 = 11 x 1 TB disks + 1 spare located on the unit.&lt;/P&gt;&lt;P&gt;aggr1 = 13 x 1 TB disks + 1 spare located on the shelf&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;This would keep the disks separate and maximize the number of spindles and space.&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Tue, 28 Dec 2010 20:52:31 GMT</pubDate>
      <guid>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28120#M2520</guid>
      <dc:creator>brasbehlph</dc:creator>
      <dc:date>2010-12-28T20:52:31Z</dc:date>
    </item>
    <item>
      <title>Re: Aggregate size for small scale implementations</title>
      <link>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28125#M2521</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;Sounds like a very reasonable plan to me...&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;I hope this response has been helpful to you.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;At your service,&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;Eugene E. Kashpureff&lt;BR /&gt;&lt;A class="jive-link-email-small" href="mailto:ekashp@kashpureff.org" target="_blank"&gt;ekashp@kashpureff.org&lt;/A&gt;&lt;BR /&gt;NetApp Instructor and Independent Consultant&lt;BR /&gt;&lt;A class="jive-link-external-small" href="http://www.linkedin.com/in/eugenekashpureff" target="_blank"&gt;http://www.linkedin.com/in/eugenekashpureff&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;(P.S. I appreciate points for helpful or correct answers.)&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Wed, 29 Dec 2010 02:47:37 GMT</pubDate>
      <guid>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28125#M2521</guid>
      <dc:creator>ekashpureff</dc:creator>
      <dc:date>2010-12-29T02:47:37Z</dc:date>
    </item>
    <item>
      <title>Re: Aggregate size for small scale implementations</title>
      <link>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28129#M2522</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;wasn't the main reason to seperate out Vol0 from the data volumes due to WAFL_iron or WAFL_check runs?&amp;nbsp; my understanding was that during the run the aggregate was unavailable and since the root volume is now contained in that aggregate, with the data, the entire system is now unusable for the run time.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;but, if you had a separate root volume and a data volume needed a run, the other aggregates were still accessible.&amp;nbsp;&amp;nbsp; I do agree that its a waste of space though.&amp;nbsp; and i also agree the risk is small.&amp;nbsp; But i prefer completely informed decisions.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Am I off base here?&amp;nbsp; or is this old thinking?&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Wed, 29 Dec 2010 21:20:17 GMT</pubDate>
      <guid>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28129#M2522</guid>
      <dc:creator>ken_foster</dc:creator>
      <dc:date>2010-12-29T21:20:17Z</dc:date>
    </item>
    <item>
      <title>Re: Aggregate size for small scale implementations</title>
      <link>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28134#M2523</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;Ken -&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;You're correct with this thinking...&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;The consideration is storage utilization versus the very small risk of corrupting the aggregate WAFL file system.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;From the Data ONTAP 'Storage Management Guide' (p 164):&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;The following are additional facts and considerations if the root volume is on a disk shelf:&lt;BR /&gt;• Data ONTAP supports two levels of RAID protection, RAID4 and RAID-DP. RAID4 requires a&lt;BR /&gt;minimum of two disks and can protect against single-disk failures. RAID-DP requires a minimum&lt;BR /&gt;of three disks and can protect against double-disk failures. The root volume can exist as the&lt;BR /&gt;traditional stand-alone two-disk volume (RAID4) or three-disk volume (RAID-DP).&lt;BR /&gt;Alternatively, the root volume can exist as a FlexVol volume that is part of a larger hosting&lt;BR /&gt;aggregate.&lt;BR /&gt;• Smaller stand-alone root volumes offer fault isolation from general application storage. On the&lt;BR /&gt;other hand, FlexVol volumes have less impact on overall storage utilization, because they do not&lt;BR /&gt;require two or three disks to be dedicated to the root volume and its small storage requirements.&lt;BR /&gt;• If a FlexVol volume is used for the root volume, file system consistency checks and recovery&lt;BR /&gt;operations could take longer to finish than with the two- or three-disk traditional root volume.&lt;BR /&gt;FlexVol recovery commands work at the aggregate level, so all of the aggregate's disks are&lt;BR /&gt;targeted by the operation. One way to mitigate this effect is to use a smaller aggregate with only a&lt;BR /&gt;few disks to house the FlexVol volume containing the root volume.&lt;BR /&gt;&lt;STRONG&gt;• In practice, having the root volume on a FlexVol volume makes a bigger difference with smaller&lt;BR /&gt;capacity storage systems than with very large ones, in which dedicating two disks for the root&lt;BR /&gt;volume has little impact.&lt;BR /&gt;&lt;/STRONG&gt;• For higher resiliency, use a separate two-disk root volume.&lt;BR /&gt;Note: You should convert a two-disk root volume to a RAID-DP volume when performing a&lt;BR /&gt;disk firmware update, because RAID-DP is required for disk firmware updates to be&lt;BR /&gt;nondisruptive. When all disk firmware and Data ONTAP updates have been completed, you&lt;BR /&gt;can convert the root volume back to RAID4.&lt;BR /&gt;For Data ONTAP 7.3 and later, the default RAID type for traditional root volume is RAID-DP.&lt;BR /&gt;If you want to use RAID4 as the raid type for your traditional root volume to minimize the&lt;BR /&gt;number of disks required, you can change the RAID type from RAID-DP to RAID4 by using&lt;BR /&gt;vol options vol0 raidtype raid4.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Given the larger drive sizes we have these days and max sized aggregates as the norm I'd be more&lt;/P&gt;&lt;P&gt;worried about my data being unavailable than the root volume. An alternative I've discussed in classes&lt;/P&gt;&lt;P&gt;is to create an alternative root on a second aggregate that could be booted off of...&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;For protection against a wafl_iron scenario being painful you may also wish to review an earlier&lt;/P&gt;&lt;P&gt;discussion regarding aggregate snap shots and snap reserve:&lt;/P&gt;&lt;P&gt;&lt;A class="jive-link-external-small" href="http://communities.netapp.com/message/41806" target="_blank"&gt;http://communities.netapp.com/message/41806&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;I hope this response has been helpful to you.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;At your service,&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;Eugene E. Kashpureff&lt;BR /&gt;&lt;A class="jive-link-email-small" href="mailto:ekashp@kashpureff.org" target="_blank"&gt;ekashp@kashpureff.org&lt;/A&gt;&lt;BR /&gt;NetApp Instructor and Independent Consultant&lt;BR /&gt;&lt;A class="jive-link-external-small" href="http://www.linkedin.com/in/eugenekashpureff" target="_blank"&gt;http://www.linkedin.com/in/eugenekashpureff&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;(P.S. I appreciate points for helpful or correct answers.)&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Wed, 29 Dec 2010 21:58:45 GMT</pubDate>
      <guid>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28134#M2523</guid>
      <dc:creator>ekashpureff</dc:creator>
      <dc:date>2010-12-29T21:58:45Z</dc:date>
    </item>
    <item>
      <title>Re: Aggregate size for small scale implementations</title>
      <link>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28142#M2524</link>
      <description>&lt;HTML&gt;&lt;HEAD&gt;&lt;/HEAD&gt;&lt;BODY&gt;&lt;P&gt;Good point Ken.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Is there really a need to have two spares per disk type for this implementation based on best practice or is have 1 ok.&lt;/P&gt;&lt;P&gt;Also, since this filer is not in production I will have some time to test the different configurations. Is there any benchmarks to refer to for comparison of disk write speeds for a mounted nfs export?&lt;/P&gt;&lt;P&gt;I do know there are many variables like iops,cpu,network,raid,... can produce different results, I would like to get some ballpark numbers calculated.&lt;/P&gt;&lt;P&gt;For instance, of the GigE theoretic 125MB/s network throughput, how does ~80MB/s using dd rank?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Thanks Again,&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;B&lt;/P&gt;&lt;/BODY&gt;&lt;/HTML&gt;</description>
      <pubDate>Thu, 30 Dec 2010 22:28:14 GMT</pubDate>
      <guid>https://community.netapp.com/t5/Network-and-Storage-Protocols/Aggregate-size-for-small-scale-implementations/m-p/28142#M2524</guid>
      <dc:creator>brasbehlph</dc:creator>
      <dc:date>2010-12-30T22:28:14Z</dc:date>
    </item>
  </channel>
</rss>

