From 5b1c112550f3b07cb935cbcc207fdf0f91594166 Mon Sep 17 00:00:00 2001 From: Peter Upton Date: Wed, 11 Sep 2024 12:07:15 -0500 Subject: [PATCH 1/7] remove grand --- .../allocation-management.md | 2 +- .../not_in_nav/sbank-examples.md | 8 ++--- .../allocation-management/overview.md | 1 - .../project-management/starting-alcf-award.md | 2 +- .../compiling-and-linking-overview.md | 2 +- docs/aurora/data-management/lustre/gecko.md | 6 ++-- docs/aurora/sunspot-to-aurora.md | 8 ++--- .../acdc/eagle-data-sharing.md | 28 ++++++++-------- .../data-transfer/using-globus.md | 3 +- .../filesystem-and-storage/data-storage.md | 7 +--- .../filesystem-and-storage/disk-quota.md | 7 ++-- .../filesystem-and-storage/file-systems.md | 8 ++--- .../applications/gromacs.md | 4 +-- .../applications/lammps.md | 2 +- .../applications/nekrs.md | 2 +- .../applications/openmm.md | 2 +- .../applications/vasp.md | 2 +- .../containers/containers.md | 2 +- docs/polaris/data-science-workflows/julia.md | 12 +++---- .../visualization/paraview-manual-launch.md | 2 +- .../visualization/scripts/server_polaris.pvsc | 4 +-- .../scripts/server_polaris_windows.pvsc | 4 +-- docs/polaris/workflows/libensemble.md | 4 +-- docs/polaris/workflows/mig-compute.md | 2 +- docs/polaris/workflows/parsl.md | 4 +-- .../data-and-software-policies/data-policy.md | 32 +++++++++---------- docs/running-jobs/example-job-scripts.md | 14 ++++---- docs/running-jobs/job-and-queue-scheduling.md | 4 +-- .../dl-frameworks/deepspeed.md | 2 +- .../dl-frameworks/running-pytorch-conda.md | 2 +- .../dl-frameworks/running-tensorflow-conda.md | 2 +- .../not_in_nav/performance-tools/darshan.md | 2 +- .../job-and-queue-scheduling.md | 16 +++++----- mkdocs.yml | 2 +- 34 files changed, 97 insertions(+), 107 deletions(-) diff --git a/docs/account-project-management/allocation-management/allocation-management.md b/docs/account-project-management/allocation-management/allocation-management.md index eab67ff27..54e8d3e93 100644 --- a/docs/account-project-management/allocation-management/allocation-management.md +++ b/docs/account-project-management/allocation-management/allocation-management.md @@ -72,7 +72,7 @@ Note: `hourstomove` must be greater than or equal to the available balance for t Submit jobs to a suballocation. Note that the user should be on the suballocation’s user list -`Eg: qsub -l select=10,walltime=30:00,filesystems=grand:home -A -q demand test.sh` +`Eg: qsub -l select=10,walltime=30:00,filesystems=eagle:home -A -q demand test.sh` Note: Once submanagement is enabled for a project allocation, all job submissions must specify the `suballocationID` diff --git a/docs/account-project-management/allocation-management/not_in_nav/sbank-examples.md b/docs/account-project-management/allocation-management/not_in_nav/sbank-examples.md index 1f12d1afa..6f09eab4d 100644 --- a/docs/account-project-management/allocation-management/not_in_nav/sbank-examples.md +++ b/docs/account-project-management/allocation-management/not_in_nav/sbank-examples.md @@ -30,16 +30,16 @@ Totals: Jobs : 3 ``` -### List your project's quota on Grand and/or Eagle File system +### List your project's quota on Eagle File system ``` -> sbank-list-allocations -p ProjectX -r grand +> sbank-list-allocations -p ProjectX -r eagle Allocation Suballocation Start End Resource Project Quota ---------- ------------- ---------- ---------- -------- ----------- ----- - 6687 6555 2020-12-16 2022-01-01 grand ProjectX 1.0 + 6687 6555 2020-12-16 2022-01-01 eagle ProjectX 1.0 Totals: Rows: 1 - Grand: + Eagle: Quota: 1.0 TB > sbank-list-allocations -p ProjectX -r eagle diff --git a/docs/account-project-management/allocation-management/overview.md b/docs/account-project-management/allocation-management/overview.md index 06554aa7a..b588d1947 100644 --- a/docs/account-project-management/allocation-management/overview.md +++ b/docs/account-project-management/allocation-management/overview.md @@ -19,7 +19,6 @@ While requesting an allocation, users can choose from: * Polaris **File System:** -* Grand * Eagle (Community Sharing) ## Policy Information Related to Allocations diff --git a/docs/account-project-management/project-management/starting-alcf-award.md b/docs/account-project-management/project-management/starting-alcf-award.md index fab882cdf..2fc769463 100644 --- a/docs/account-project-management/project-management/starting-alcf-award.md +++ b/docs/account-project-management/project-management/starting-alcf-award.md @@ -19,7 +19,7 @@ Before your project begins, you will receive an email with the following project - **Project Proxies**: Project members designated by PIs that are authorized to add or renew project members on your behalf. - **Allocation System(s) and Allocation Amount**: The approved system(s) and amount of your award in node hours. - **Approved Quota**: The approved amount of disk space for your project directory. -- **File System**: The file system where your project directory will reside. For information on the Grand and Eagle file systems, see Storage and Networking. +- **File System**: The file system where your project directory will reside. For information on the Eagle file system, see Storage and Networking. - **Assigned Catalyst**: INCITE projects will have ALCF staff members that are assigned to the projects who are available to assist the team throughout the duration of the INCITE allocation. - **Allocation Start Date**: The start date of your award. - **Allocation End Date**: The end date of your award. diff --git a/docs/aurora/compiling-and-linking/compiling-and-linking-overview.md b/docs/aurora/compiling-and-linking/compiling-and-linking-overview.md index 94cadb070..11a9b2b06 100644 --- a/docs/aurora/compiling-and-linking/compiling-and-linking-overview.md +++ b/docs/aurora/compiling-and-linking/compiling-and-linking-overview.md @@ -6,7 +6,7 @@ If your build system does not require GPUs for the build process, compilation of ## Filesystem -It is helpful to realize that currently there is a single _temporary_ filesystem `gecko` mounted on the Aurora login and compute nodes available to users, where both `home` and `project` spaces reside. It is important to realize that this filesystem is not backed up and users should take care to retain copies of important files (e.g. local resources or ALCF's `grand` and `eagle` filesystems). +It is helpful to realize that currently there is a single _temporary_ filesystem `gecko` mounted on the Aurora login and compute nodes available to users, where both `home` and `project` spaces reside. It is important to realize that this filesystem is not backed up and users should take care to retain copies of important files (e.g. local resources or ALCF's `eagle` filesystem). ## OneAPI Programming Environment diff --git a/docs/aurora/data-management/lustre/gecko.md b/docs/aurora/data-management/lustre/gecko.md index a02b22a08..ca570c49a 100644 --- a/docs/aurora/data-management/lustre/gecko.md +++ b/docs/aurora/data-management/lustre/gecko.md @@ -25,7 +25,7 @@ When you use an SSH proxy, it takes the authentication mechanism from the local ### Transferring files from other ALCF systems -With the bastion pass-through nodes currently used to access both Sunspot and Aurora, users will find it helpful to modify their .ssh/config files on Aurora appropriately to facilitate transfers to Aurora from other ALCF systems. These changes are similar to what Sunspot users may have already implemented. From an Aurora login-node, this readily enables one to transfer files from Sunspot's gila filesystem or one of the production filesystems at ALCF (home, grand, and eagle) mounted on an ALCF system's login node. With the use of ProxyJump below, entering the MobilePass+ or Cryptocard passcode twice will be needed (once for bastion and once for the other resource). A simple example shows the .ssh/config entries for Polaris and the scp command for transferring from Polaris: +With the bastion pass-through nodes currently used to access both Sunspot and Aurora, users will find it helpful to modify their .ssh/config files on Aurora appropriately to facilitate transfers to Aurora from other ALCF systems. These changes are similar to what Sunspot users may have already implemented. From an Aurora login-node, this readily enables one to transfer files from Sunspot's gila filesystem or one of the production filesystems at ALCF (home and eagle) mounted on an ALCF system's login node. With the use of ProxyJump below, entering the MobilePass+ or Cryptocard passcode twice will be needed (once for bastion and once for the other resource). A simple example shows the .ssh/config entries for Polaris and the scp command for transferring from Polaris: ``` $ cat .ssh/config @@ -40,7 +40,7 @@ Host polaris.alcf.anl.gov ``` ``` -knight@aurora-uan-0009:~> scp knight@polaris.alcf.anl.gov:/grand/catalyst/proj-shared/knight/test.txt ./ +knight@aurora-uan-0009:~> scp knight@polaris.alcf.anl.gov:/eagle/catalyst/proj-shared/knight/test.txt ./ --------------------------------------------------------------------------- Notice to Users ... @@ -50,5 +50,5 @@ knight@aurora-uan-0009:~> scp knight@polaris.alcf.anl.gov:/grand/catalyst/proj-s ... [Password: knight@aurora-uan-0009:~> cat test.txt -from_polaris grand +from_polaris eagle ``` diff --git a/docs/aurora/sunspot-to-aurora.md b/docs/aurora/sunspot-to-aurora.md index 9586ccf88..30b9cc154 100644 --- a/docs/aurora/sunspot-to-aurora.md +++ b/docs/aurora/sunspot-to-aurora.md @@ -31,7 +31,7 @@ Host polaris.alcf.anl.gov user knight ``` -From an Aurora login-node, this readily enables one to transfer files from Sunspot's gila filesystem or one of the production filesystems at ALCF (home, grand, and eagle). With the use of ProxyJump here, entering the MobilePass+ or Cryptocard passcode twice will be needed (once for bastion and once for the other resource). +From an Aurora login-node, this readily enables one to transfer files from Sunspot's gila filesystem or one of the production filesystems at ALCF (home and eagle). With the use of ProxyJump here, entering the MobilePass+ or Cryptocard passcode twice will be needed (once for bastion and once for the other resource). This simple example transfers a file from Sunspot. @@ -49,10 +49,10 @@ knight@aurora-uan-0009:~> cat test.txt from_sunspot gila ``` -This simple example transfers a file from the grand filesystem via Polaris. +This simple example transfers a file from the eagle filesystem via Polaris. ``` -knight@aurora-uan-0009:~> scp knight@polaris.alcf.anl.gov:/grand/catalyst/proj-shared/knight/test.txt ./ +knight@aurora-uan-0009:~> scp knight@polaris.alcf.anl.gov:/eagle/catalyst/proj-shared/knight/test.txt ./ --------------------------------------------------------------------------- Notice to Users ... @@ -62,7 +62,7 @@ knight@aurora-uan-0009:~> scp knight@polaris.alcf.anl.gov:/grand/catalyst/proj-s ... [Password: knight@aurora-uan-0009:~> cat test.txt -from_polaris grand +from_polaris eagle ``` ## Default software environment diff --git a/docs/data-management/acdc/eagle-data-sharing.md b/docs/data-management/acdc/eagle-data-sharing.md index 045462291..b6dda3c18 100644 --- a/docs/data-management/acdc/eagle-data-sharing.md +++ b/docs/data-management/acdc/eagle-data-sharing.md @@ -1,4 +1,4 @@ -# Sharing Data on Grand/Eagle Using Globus Guest Collections +# Sharing Data on Eagle Using Globus Guest Collections ## Overview Collaborators throughout the scientific community have the ability to write data to and read scientific data from the Eagle filesystem using Globus sharing capability. This capability provides PIs with a natural and convenient storage space for collaborative work. @@ -26,7 +26,7 @@ Type or scroll down to "Argonne LCF" in the "Use your existing organizational lo You will be taken to a familiar-looking page for ALCF login. Enter your ALCF login username and password. -## Accessing your Grand/Eagle Project Directory ## +## Accessing your Eagle Project Directory ## @@ -61,8 +61,8 @@ A project PI needs to have an 'active' ALCF account in place to create and share There are multiple ways to Navigate to the Collections tab in "Endpoints": 1. [Click the link to get started](https://app.globus.org/file-manager/collections/05d2c76a-e867-4f67-aa57-76edeb0beda0/shares). It will take you to the Collections tab for Eagle. **OR** -2. Click on 'Endpoints' located in the left panel of the [Globus web app](https://app.globus.org/endpoints). Type "alcf#dtn_eagle" (for Eagle) or "alcf#dtn_grand" (for Grand) in the search box located at the top of the page and click the magnifying glass to search. Click on the Managed Public Endpoint "alcf#dtn_eagle" or "alcf#dtn_grand" from the search results. Click on the Collections tab. **OR** -3. Click on 'File Manager' located in the left panel of the Globus web app. Search for 'alcf#dtn_Eagle' (or "alcf#dtn_grand") and select it in the Collection field. Select your project directory or a sub directory that you would like to share with collaborators as a Globus guest collection. Click on 'Share' on the right side of the panel, which will take you to the Collections tab. +2. Click on 'Endpoints' located in the left panel of the [Globus web app](https://app.globus.org/endpoints). Type "alcf#dtn_eagle" (for Eagle) or "alcf#dtn_eagle" (for Eagle) in the search box located at the top of the page and click the magnifying glass to search. Click on the Managed Public Endpoint "alcf#dtn_eagle" or "alcf#dtn_eagle" from the search results. Click on the Collections tab. **OR** +3. Click on 'File Manager' located in the left panel of the Globus web app. Search for 'alcf#dtn_Eagle' (or "alcf#dtn_eagle") and select it in the Collection field. Select your project directory or a sub directory that you would like to share with collaborators as a Globus guest collection. Click on 'Share' on the right side of the panel, which will take you to the Collections tab. **Note:** When you select an endpoint to transfer data to/from, you may be asked to authenticate with that endpoint. Follow the instructions on screen to activate the endpoint and to authenticate. You may also have to provide Authentication/Consent for the Globus web app to manage collections on this endpoint @@ -142,8 +142,8 @@ Globus supports setting permissions at a folder level, so there is no need to cr
Create new group
-## Transferring data from Grand/Eagle -Log in to [Globus](https://app.globus.org) using your ALCF credentials. After authenticating, you will be taken to the Globus File Manager tab. In the 'Collection' box, type the name of Eagle/Grand managed endpoint (```alcf#dtn_eagle``` or ```alcf#dtn_grand```). Navigate to the folder/file you want to transfer. HTTPS access (read-only) is enabled so you can download files by clicking the "Download" button. +## Transferring data from Eagle +Log in to [Globus](https://app.globus.org) using your ALCF credentials. After authenticating, you will be taken to the Globus File Manager tab. In the 'Collection' box, type the name of Eagle managed endpoint (```alcf#dtn_eagle```) Navigate to the folder/file you want to transfer. HTTPS access (read-only) is enabled so you can download files by clicking the "Download" button. Click on 'Download' to download the required file. @@ -241,7 +241,7 @@ Alternatively, you can encrypt the files before transfer using any method on you ## FAQs ### General FAQs: -**1. What are Eagle and Grand file systems?** +**1. What is the Eagle file system?** They are Lustre file systems residing on an HPE ClusterStor E1000 platform equipped with 100 Petabytes of usable capacity across 8480 disk drives. Each ClusterStor platform also provides 160 Object Storage Targets and 40 Metadata Targets with an aggregate data transfer rate of 650GB/s. @@ -249,7 +249,7 @@ They are Lustre file systems residing on an HPE ClusterStor E1000 platform equip - Guest collections: A Guest collection is a logical construct that a PI sets up on their project directory in Globus that makes it accessible to collaborators. The PI creates a guest collection at or below their project and shares it with the Globus account holders. - Shared collection: A guest collection becomes a shared collection when it is shared with a user/group. -- Mapped Collections: Mapped Collections are created by the endpoint administrators. In the case of Eagle/Grand, these are created by ALCF. +- Mapped Collections: Mapped Collections are created by the endpoint administrators. In the case of Eagle, these are created by ALCF. **3. Who can create Guest collections?** @@ -289,18 +289,18 @@ Yes. The PI needs to have an 'active' ALCF account in place to create and share **3. What endpoint should the PI use?** -```alcf#dtn_eagle``` (project on Eagle) or ```alcf#dtn_eagle``` (project on Grand) +```alcf#dtn_eagle``` (project on Eagle) **4. What are the actions a PI can perform?** - Create and delete guest collections, groups - Create, delete and share the data with ALCF users and external collaborators - Specify someone as a Proxy (Access Manager) for the guest collections -- Transfer data between the guest collection on Eagle/Grand and other Globus endpoints/collections +- Transfer data between the guest collection on Eagle and other Globus endpoints/collections **5. How can a PI specify someone as a Proxy on the Globus side?** -Go to alcf#dtn_eagle (or alcf#dtn_grand) -> collections -> shared collection -> roles -> select 'Access Manager' +Go to alcf#dtn_eagle (or alcf#dtn_eagle) -> collections -> shared collection -> roles -> select 'Access Manager'
![Roles](files/roles.png){ width="700" } @@ -317,7 +317,7 @@ Go to alcf#dtn_eagle (or alcf#dtn_grand) -> collections -> shared collection -> 1. PI requests a compute or data-only allocation project. 2. Once the request is approved, ALCF staff sets up a project, unixgroup, and project directory. 3. A Globus sharing policy is created for the project with appropriate access controls, provided the PI has an active ALCF account. -4. PI creates a guest collection for the project, using the Globus mapped collection for the file system (alcf#dtn_eagle or alcf#dtn_grand). +4. PI creates a guest collection for the project, using the Globus mapped collection for the file system (alcf#dtn_eagle) - **Note:** PI needs to have an active ALCF Account and will need to log in to Globus using their ALCF credentials. - If PI has an existing Globus account, it needs to be linked to their ALCF account. @@ -326,7 +326,7 @@ Go to alcf#dtn_eagle (or alcf#dtn_grand) -> collections -> shared collection -> **7. How can project members with ALCF accounts access the project directory via Globus?** - Users that have active ALCF accounts and are part of the project in the ALCF Account and Project Management system will automatically have access to the project directory which they can access by browsing the Globus endpoint ```alcf#dtn_eagle or alcf#dtn_grand```. If they want to access the files using the Globus guest collection set up by the PI, the PI will need to explicitly give them permissions to that guest collection. The purpose of Globus guest collections is to share the data with collaborators that don't have ALCF accounts or are not part of the project in the ALCF Account and Project Management system. + Users that have active ALCF accounts and are part of the project in the ALCF Account and Project Management system will automatically have access to the project directory which they can access by browsing the Globus endpoint ```alcf#dtn_eagle``` . If they want to access the files using the Globus guest collection set up by the PI, the PI will need to explicitly give them permissions to that guest collection. The purpose of Globus guest collections is to share the data with collaborators that don't have ALCF accounts or are not part of the project in the ALCF Account and Project Management system. **8. Who has the permissions to create a guest collection?** @@ -386,7 +386,7 @@ No. An access manager cannot create a collection, only a PI can do that. The acc **7. Can an Access Manager leave a globus group or withdraw membership request for collaborators?** -Yes.[Go to alcf#dtn_eagle (or alcf#dtn_grand)-> Groups > group_name -> Members -> click on specific user -> Role & Status -> Set the appropriate status] +Yes.[Go to alcf#dtn_eagle -> Groups > group_name -> Members -> click on specific user -> Role & Status -> Set the appropriate status]
![Permission denied](files/roles.png){ width="700" } diff --git a/docs/data-management/data-transfer/using-globus.md b/docs/data-management/data-transfer/using-globus.md index 41d695c47..4529b7716 100644 --- a/docs/data-management/data-transfer/using-globus.md +++ b/docs/data-management/data-transfer/using-globus.md @@ -8,14 +8,13 @@ Basic documentation for getting started with Globus can be found at the followin [https://docs.globus.org/how-to/](https://docs.globus.org/how-to/) ## Data Transfer Node -Several data transfer nodes (DTNs) for `/home`, Grand, Eagle, and HPSS are available to ALCF users, allowing users to perform wide and local area data transfers. Access to the DTNs is provided via the following Globus endpoints. +Several data transfer nodes (DTNs) for `/home`, Eagle, and HPSS are available to ALCF users, allowing users to perform wide and local area data transfers. Access to the DTNs is provided via the following Globus endpoints. ## ALCF Globus Endpoints The Globus endpoint and the path to use depends on where your data resides. If your data is on: - `/home` which is where your home directory resides: `alcf#dtn_home` for accessing `/home` (i.e. home directories on swift-home filesystem). Use the path `/` - HPSS: `alcf#dtn_hpss` -- Grand filesystem: `alcf#dtn_grand` for accessing `/lus/grand/projects` or `/grand` (i.e. project directories on Grand filesystem). Use the path `/grand/` - Eagle filesystem: `alcf#dtn_eagle` for accessing /`lus/eagle/projects` or `/eagle` (i.e project directories on Eagle filesystem). Use the path `/eagle/` After [registering](https://app.globus.org/), simply use the appropriate ALCF endpoint, as well as other sources or destinations. Use your ALCF credentials (your OTP generated by the CryptoCARD token with PIN or Mobilepass app) to activate the ALCF endpoint. diff --git a/docs/data-management/filesystem-and-storage/data-storage.md b/docs/data-management/filesystem-and-storage/data-storage.md index bfba411e7..9f2fe27fe 100644 --- a/docs/data-management/filesystem-and-storage/data-storage.md +++ b/docs/data-management/filesystem-and-storage/data-storage.md @@ -6,11 +6,6 @@ The ALCF operates a number of file systems that are mounted globally across all ### Home A Lustre file system residing on a DDN AI-400X NVMe Flash platform. It has 24 NVMe drives with 7 TB each with 123 TB of usable space. It provides 8 Object Storage Targets and 4 Metadata Targets. -### Grand -A Lustre file system residing on an HPE ClusterStor E1000 platform equipped with 100 Petabytes of usable capacity across 8480 disk drives. This ClusterStor platform provides 160 Object Storage Targets and 40 Metadata Targets with an aggregate data transfer rate of 650GB/s. The primary use of grand is compute campaign storage. - -Also see [ALCF Data Policies](https://www.alcf.anl.gov/support-center/facility-policies/data-policy) and [Data Transfer](../data-transfer/using-globus.md) - ### Eagle A Lustre file system residing on an HPE ClusterStor E1000 platform equipped with 100 Petabytes of usable capacity across 8480 disk drives. This ClusterStor platform provides 160 Object Storage Targets and 40 Metadata Targets with an aggregate data transfer rate of 650GB/s. The primary use of eagle is data sharing with the research community. Eagle has community sharing community capabilities which allow PIs to [share their project data with external collabortors](../acdc/eagle-data-sharing.md) using Globus. Eagle can also be used for compute campaign storage. @@ -36,7 +31,7 @@ HSI can be invoked by simply entering hsi at your normal shell prompt. Once auth You may enter "help" to display a brief description of available commands. -If archiving from or retrieving to grand or eagle you must disable the Transfer Agent. -T off +If archiving from or retrieving to eagle you must disable the Transfer Agent. -T off Example archive ``` diff --git a/docs/data-management/filesystem-and-storage/disk-quota.md b/docs/data-management/filesystem-and-storage/disk-quota.md index a64ace9ea..222ac22c2 100644 --- a/docs/data-management/filesystem-and-storage/disk-quota.md +++ b/docs/data-management/filesystem-and-storage/disk-quota.md @@ -1,6 +1,6 @@ # Disk Quota ## Overview -Disk quotas are enabled on project directories. ALCF's HPC systems use the agile-home file system located at `/lus/agile/home` where quotas are also enforced. Details on the home file system are listed in [file systems](file-systems.md). Following are descriptions and examples for the home file system, as well as the Grand and Eagle project filesystems. +Disk quotas are enabled on project directories. ALCF's HPC systems use the agile-home file system located at `/lus/agile/home` where quotas are also enforced. Details on the home file system are listed in [file systems](file-systems.md). Following are descriptions and examples for the home file system, as well as the Eagle project filesystems. ## Home Directory Quotas By default, each home directory is assigned a default of 50GB. File ownership determines disk space usage. @@ -14,7 +14,7 @@ userX User /lus/agile 44.13G 50.00G ``` ## Project Directory Quotas -Grand and Eagle. The amount of data stored under /lus//projects/PROJECT_NAME cannot exceed the approved project quota limit approved during the allocation period. The total data usage under the project directory is used to calculate the disk quota. +The amount of data stored under /lus/grand/projects/PROJECT_NAME cannot exceed the approved project quota limit approved during the allocation period. The total data usage under the project directory is used to calculate the disk quota. To check project quota usage on the file systems, enter this command: ``` @@ -24,12 +24,11 @@ Lustre : Current Project Quota information for projects you're a member of: Name Type Filesystem Used Quota Grace ============================================================================================================== -projectZ Project grand 8k 1000T - projectX Project eagle 1.87T 1000T - ``` ## Requesting a New Eagle Allocation -For requesting a new project having an allocation on Eagle (with or without a compute allocation), please make a request by filling out the [Director's Discretionary allocation form](https://accounts.alcf.anl.gov/allocationRequests). Note that all new compute projects will have Grand as the default file system. +For requesting a new project having an allocation on Eagle (with or without a compute allocation), please make a request by filling out the [Director's Discretionary allocation form](https://accounts.alcf.anl.gov/allocationRequests). Note that all new compute projects will have the default file system. ## Quota Increases If you need a quota increase for Director's Discretionary allocations, please make a request by filling out the [Director's Discretionary allocation form](https://accounts.alcf.anl.gov/allocationRequests). diff --git a/docs/data-management/filesystem-and-storage/file-systems.md b/docs/data-management/filesystem-and-storage/file-systems.md index 89ee53529..0b4575e49 100644 --- a/docs/data-management/filesystem-and-storage/file-systems.md +++ b/docs/data-management/filesystem-and-storage/file-systems.md @@ -1,6 +1,6 @@ # ALCF File Systems -Our HPC systems have discrete file systems for project data: Grand and Eagle. -Grand and Eagle are 100 PB Lustre file systems mounted as /grand and /eagle respectively. +Our HPC systems store project data in a file system called Eagle. +Eagle is a Lustre file system mounted as /eagle. For more information on the Lustre file system, here is a document on Lustre File Striping Basics. * [Lustre File Striping Basics](https://www.alcf.anl.gov/support-center/training-assets/file-systems-and-io-performance) @@ -14,7 +14,6 @@ The agile-home file system is regularly backed up to tape. The data file system | Name | Accessible From | Type | Path | Production | Backed-up | Usage | |--------------------------------------|----------|--------|---------------------------------------------------------------------------------------|-----------------------------------------------|-----------|------------------------------------------------------------------------| | agile-home | Polaris | Lustre | /home or /lus/agile/home | Yes | Yes | General use | -| Grand | Polaris | Lustre | /grand or /lus/grand/projects | Yes | No | Intensive job output, large files | | Eagle | Polaris | Lustre | /eagle or /lus/eagle/projects | Yes | No | Community sharing via Globus;
Intensive job output, large files | | Node SSD

(Compute node only) | Polaris | xfs | /local/scratch (Polaris) | Yes | No | Local node scratch during run | @@ -59,9 +58,8 @@ setfacl -R -m u:gilgamesh:rX /home/username/subdirectoryname ### Project Directories -- Directories on Grand or Eagle are created when an allocation (INCITE, ALCC, Discretionary, etc.) is awarded. Eagle directories can be created as stand-alone allocations. Use the [allocation request form](https://accounts.alcf.anl.gov/allocationRequests) to submit requests for an allocation on Eagle. +- Directories on Eagle are created when an allocation (INCITE, ALCC, Discretionary, etc.) is awarded. Eagle directories can be created as stand-alone allocations. Use the [allocation request form](https://accounts.alcf.anl.gov/allocationRequests) to submit requests for an allocation on Eagle. - Directory paths: - - Grand: /grand or /lus/grand/projects - Eagle: /eagle or /lus/eagle/projects These project spaces do not have user quotas but a directory quota, meaning that ALL files contained within a project directory, regardless of the username, cannot exceed the disk space allocation granted to the project. For more information on quotas, see the [Disk Quota page](disk-quota.md). diff --git a/docs/polaris/applications-and-libraries/applications/gromacs.md b/docs/polaris/applications-and-libraries/applications/gromacs.md index da5ab7025..0826ec8be 100644 --- a/docs/polaris/applications-and-libraries/applications/gromacs.md +++ b/docs/polaris/applications-and-libraries/applications/gromacs.md @@ -40,7 +40,7 @@ A sample pbs script follows that will run GROMACS on two nodes, using 4 MPI rank #PBS -l walltime=0:30:00 #PBS -q debug #PBS -A PROJECT -#PBS -l filesystems=home:grand:eagle +#PBS -l filesystems=home:eagle cd ${PBS_O_WORKDIR} @@ -54,4 +54,4 @@ mpirun --np 8 /soft/applications/Gromacs/gromacs-2022.1/gmx_mpi \ -dlb yes -resethway -pin on -v deffnm step5_1 -g test.log ``` -We strongly suggest that users try combinations of different numbers of nodes, MPI ranks per node, number of GPU tasks/devices, GPU task decomposition between nonbonded and PME kernels, and OMP threads per rank to find the optimal throughput for their particular workload. \ No newline at end of file +We strongly suggest that users try combinations of different numbers of nodes, MPI ranks per node, number of GPU tasks/devices, GPU task decomposition between nonbonded and PME kernels, and OMP threads per rank to find the optimal throughput for their particular workload. diff --git a/docs/polaris/applications-and-libraries/applications/lammps.md b/docs/polaris/applications-and-libraries/applications/lammps.md index 667188f0d..6e8e638d7 100644 --- a/docs/polaris/applications-and-libraries/applications/lammps.md +++ b/docs/polaris/applications-and-libraries/applications/lammps.md @@ -140,7 +140,7 @@ An example submission script for running a 64-node KOKKOS-enabled LAMMPS executa #PBS -l select=64:system=polaris #PBS -l place=scatter #PBS -l walltime=0:15:00 -#PBS -l filesystems=home:grand:eagle +#PBS -l filesystems=home:eagle #PBS -q prod #PBS -A Catalyst diff --git a/docs/polaris/applications-and-libraries/applications/nekrs.md b/docs/polaris/applications-and-libraries/applications/nekrs.md index 62106fff6..1df6c7031 100644 --- a/docs/polaris/applications-and-libraries/applications/nekrs.md +++ b/docs/polaris/applications-and-libraries/applications/nekrs.md @@ -150,7 +150,7 @@ echo "#PBS -A $PROJ_ID" >>$SFILE echo "#PBS -N nekRS_$case" >>$SFILE echo "#PBS -q $QUEUE" >>$SFILE echo "#PBS -l walltime=$time" >>$SFILE -echo "#PBS -l filesystems=home:eagle:grand" >>$SFILE +echo "#PBS -l filesystems=home:eagle" >>$SFILE echo "#PBS -l select=$nodes:system=polaris" >>$SFILE echo "#PBS -l place=scatter" >>$SFILE echo "#PBS -k doe" >>$SFILE #write directly to the destination, doe=direct, output, error diff --git a/docs/polaris/applications-and-libraries/applications/openmm.md b/docs/polaris/applications-and-libraries/applications/openmm.md index 576273d02..677b2091c 100644 --- a/docs/polaris/applications-and-libraries/applications/openmm.md +++ b/docs/polaris/applications-and-libraries/applications/openmm.md @@ -44,7 +44,7 @@ A sample pbs script follows that will run OpenMM benchmark on one node. #PBS -l walltime=0:30:00 #PBS -q debug #PBS -A PROJECT -#PBS -l filesystems=home:grand:eagle +#PBS -l filesystems=home:eagle cd ${PBS_O_WORKDIR} diff --git a/docs/polaris/applications-and-libraries/applications/vasp.md b/docs/polaris/applications-and-libraries/applications/vasp.md index 263b3e7ae..e59abbf98 100644 --- a/docs/polaris/applications-and-libraries/applications/vasp.md +++ b/docs/polaris/applications-and-libraries/applications/vasp.md @@ -157,7 +157,7 @@ An example of a submission script could be found here ` /soft/applications/vasp/ #PBS -l select=1:system=polaris #PBS -l place=scatter #PBS -l walltime=0:30:00 -#PBS -l filesystems=home:grand:eagle +#PBS -l filesystems=home:eagle #PBS -q debug #PBS -A MYPROJECT diff --git a/docs/polaris/data-science-workflows/containers/containers.md b/docs/polaris/data-science-workflows/containers/containers.md index c4ee96c91..9e7a08285 100644 --- a/docs/polaris/data-science-workflows/containers/containers.md +++ b/docs/polaris/data-science-workflows/containers/containers.md @@ -50,7 +50,7 @@ To run a container on Polaris you can use the submission script described [here] #PBS -q debug #PBS -l place=scatter #PBS -l walltime=0:30:00 -#PBS -l filesystems=home:grand +#PBS -l filesystems=home:eagle #PBS -A cd ${PBS_O_WORKDIR} echo $CONTAINER diff --git a/docs/polaris/data-science-workflows/julia.md b/docs/polaris/data-science-workflows/julia.md index f74ff6cbd..e30880ee6 100644 --- a/docs/polaris/data-science-workflows/julia.md +++ b/docs/polaris/data-science-workflows/julia.md @@ -52,13 +52,13 @@ The Julia built-in package manager allows you to create a project and enable project-specific dependencies. Julia manages packages in the Julia depot located by default in `~/.julia`. However, that NFS filesystem is not meant for high-speed access. Therefore, this Julia depot folder should be located on a -fast filesystem of your choice (grand, eagle). The Julia depot directory is +fast filesystem of your choice. The Julia depot directory is set via the environment variable `JULIA_DEPOT_PATH`. For example, you can set -the Julia depot to a directory on Polaris grand filesystem by adding the following line +the Julia depot to a directory on Polaris eagle filesystem by adding the following line to your `~/.bashrc` file: ```bash -export JULIA_DEPOT_PATH=/grand/$PROJECT/$USER/julia_depot +export JULIA_DEPOT_PATH=/eagle/$PROJECT/$USER/julia_depot ``` ## Programming Julia on Polaris @@ -131,7 +131,7 @@ $ julia --project -e 'using Pkg; Pkg.add("CUDA")' The GPUs are not currently usable on the Polaris login nodes, so one can confirm the version of CUDA being used by testing in a batch or interactive job on a compute node. ``` -$ qsub -I -l select=1,walltime=1:00:00,filesystems=home:grand:eagle -A [PROJECT] -q debug +$ qsub -I -l select=1,walltime=1:00:00,filesystems=home:eagle -A [PROJECT] -q debug $ julia --project -e "using CUDA; CUDA.versioninfo()" CUDA runtime 12.4, artifact installation @@ -330,7 +330,7 @@ This example can be run on Polaris with the following job submission script: #PBS -l select=1:system=polaris #PBS -l place=scatter #PBS -l walltime=0:30:00 -#PBS -l filesystems=home:grand +#PBS -l filesystems=home:eagle #PBS -q debug #PBS -A PROJECT @@ -364,7 +364,7 @@ module load craype-accel-nvidia80 module load cray-hdf5-parallel export PATH=/home/knight/.juliaup/bin:${PATH} -export JULIA_DEPOT_PATH=/grand/catalyst/proj-shared/knight/polaris/julia/depot +export JULIA_DEPOT_PATH=/eagle/catalyst/proj-shared/knight/polaris/julia/depot export JULIA_HDF5_PATH=$HDF5_DIR diff --git a/docs/polaris/visualization/paraview-manual-launch.md b/docs/polaris/visualization/paraview-manual-launch.md index 03bcee8e9..aa175a89a 100644 --- a/docs/polaris/visualization/paraview-manual-launch.md +++ b/docs/polaris/visualization/paraview-manual-launch.md @@ -27,7 +27,7 @@ You will use these settings when establishing the connection. You can launch an interactive session on Polaris compute nodes with the following command (adjust parameters as needed to match your allocation, desired number of nodes, queue, walltime, and filesystems): ```shell -qsub -l walltime=01:00:00 -l select=2 -A yourallocation -q debug -I -l filesystems=home:grand +qsub -l walltime=01:00:00 -l select=2 -A yourallocation -q debug -I -l filesystems=home:eagle ``` When the job starts you will receive a prompt on your head node like this: diff --git a/docs/polaris/visualization/scripts/server_polaris.pvsc b/docs/polaris/visualization/scripts/server_polaris.pvsc index e94f5af7c..9dc023015 100644 --- a/docs/polaris/visualization/scripts/server_polaris.pvsc +++ b/docs/polaris/visualization/scripts/server_polaris.pvsc @@ -39,7 +39,7 @@