Skip to content

Amber

Description

Amber is a suite of biomolecular simulation programs.

Homepage: http://ambermd.org/

Documentation: http://ambermd.org/Manuals.php

Access

Amber is licensed software. Please send an e-mail to help@hprc.tamu.edu with a request for access.

Loading the Module

To see all versions of Amber available:

$module spider amber

Running Amber

It is best to run Amber with a single GPU. Parallelizing multiple GPUs on a single calculation won't provide much speedup.

Grace MPI Examples

#!/bin/bash

## NECESSARY JOB SPECIFICATIONS
#SBATCH --job-name=amber         # Set the job name to "amber"
#SBATCH --time=01:30:00          # Set the wall clock limit to 1hr and 30min
#SBATCH --ntasks=48              # Request 48 tasks
#SBATCH --ntasks-per-node=48     # Request 48 tasks per node
#SBATCH --mem=48G                # Request 48GB per node
#SBATCH --output=amber.%j        # Send stdout/err to "amber.[jobID]"

module purge                     # purge all module
module load GCC/11.2.0 OpenMPI/4.1.1 Amber/22-Python-3.9.6

# Please visit the Amber documentation for a full list of options for pmemd.MPI and the other programs availabe in the amber suite of programs.  
mpirun -np $SLURM_NPROCS pmemd.MPI -O -i amber.in -o amber.out -p amber.prmtop -c amber.inpcrd

exit

Grace GPU Examples

#!/bin/bash

## NECESSARY JOB SPECIFICATIONS
#SBATCH --job-name=amber         # Set the job name to "amber"
#SBATCH --time=01:30:00          # Set the wall clock limit to 1hr and 30min
#SBATCH --nodes=1                # Request 1 node
#SBATCH --ntasks=1               # Request 1 task
#SBATCH --ntasks-per-node=1      # Request 1 task per node
#SBATCH --mem=48G                # Request 48GB per node
#SBATCH --partition=gpu          # Request gpu partition
#SBATCH --gres=gpu:rtx:1         # Request gpu type; in Grace it can be t4/rtx/a100
#SBATCH --output=amber.%j        # Send stdout/err to "amber.[jobID]"

module purge                     # purge all module
module load GCC/11.2.0 OpenMPI/4.1.1 Amber/22-CUDA-11.5-Python-3.9.6

# Please visit the Amber documentation for a full list of options for pmemd.cuda and the other programs availabe in the amber suite of programs.  
pmemd.cuda -O -i amber.in -o amber.out -p amber.prmtop -c amber.inpcrd

exit

FASTER MPI Examples

#!/bin/bash

## NECESSARY JOB SPECIFICATIONS
#SBATCH --job-name=amber         # Set the job name to "amber"
#SBATCH --time=01:30:00          # Set the wall clock limit to 1hr and 30min
#SBATCH --ntasks=48              # Request 48 tasks
#SBATCH --ntasks-per-node=48     # Request 48 tasks per node
#SBATCH --mem=48G                # Request 48GB per node
#SBATCH --output=amber.%j        # Send stdout/err to "amber.[jobID]"

module purge                     # purge all module
module load GCC/10.3.0  OpenMPI/4.1.1 Amber/20-CUDA-11.4.1-Python-3.9.5

# Please visit the Amber documentation for a full list of options for pmemd.MPI and the other programs availabe in the amber suite of programs.  
mpirun -np $SLURM_NPROCS pmemd.MPI -O -i amber.in -o amber.out -p amber.prmtop -c amber.inpcrd

exit

FASTER GPU Examples

#!/bin/bash

## NECESSARY JOB SPECIFICATIONS
#SBATCH --job-name=amber         # Set the job name to "amber"
#SBATCH --time=01:30:00          # Set the wall clock limit to 1hr and 30min
#SBATCH --nodes=1                # Request 1 node
#SBATCH --ntasks=1               # Request 1 task
#SBATCH --ntasks-per-node=1      # Request 1 task per node
#SBATCH --mem=48G                # Request 48GB per node
#SBATCH --partition=gpu          # Request gpu partition
#SBATCH --gres=gpu:t4:1         # Request gpu type; in Grace it can be t4/rtx/a100
#SBATCH --output=amber.%j        # Send stdout/err to "amber.[jobID]"

module purge                     # purge all module
module load GCC/10.3.0  OpenMPI/4.1.1 Amber/20-CUDA-11.4.1-NCCL-2.11.4-Python-3.9.5

# Please visit the Amber documentation for a full list of options for pmemd.cuda and the other programs availabe in the amber suite of programs.  
pmemd.cuda -O -i amber.in -o amber.out -p amber.prmtop -c amber.inpcrd

exit
Back to top