Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # specify directory as data io info
- BASEDIR = "/content/"
- # final DATADIR is BASEDIR / DATADIR if not accessing from gcs
- # if accessing from gcs, the given path is the DATADIR
- # training, testing and val data dir are then
- # DATADIR / "training", DATADIR / "testing", DATADIR / "validation" resp.
- DATADIR = "datasets"
- # OUTPUT_DIR from BASEDIR
- # The OUTPUT_DIR is BASEDIR / OUTPUT_DIR
- OUTPUT_DIR = "output"
- MODEL_NAME = "aces"
- MODEL_CHECKPOINT_NAME = "modelCheckpoint"
- MODEL_DIR_NAME = "unet_v1"
- # Auto generate model dir name
- # if so it is generated as trial_MODELTYPE + datetime.now() + _v + version
- AUTO_MODEL_DIR_NAME = False
- # specify features and labels
- # keep them in this format for reading
- FEATURES = "red_before
- green_before
- blue_before
- nir_before
- red_during
- green_during
- blue_during
- nir_during"
- # For model training, USE_ELEVATION extends FEATURES with "elevation" & "slope"
- # USE_S1 extends FEATURES with "vv_asc_before", "vh_asc_before", "vv_asc_during", "vh_asc_during",
- # "vv_desc_before", "vh_desc_before", "vv_desc_during", "vh_desc_during"
- # In case these are not useful and you have other bands in your training data, you can do set
- # USE_ELEVATION and USE_S1 to False and update FEATURES to include needed bands
- USE_ELEVATION = True
- USE_S1 = True
- DERIVE_FEATURES = False
- ADDED_FEATURES = "ndvi_before
- ndvi_during
- evi_before
- evi_during
- ndwi_before
- ndwi_during
- savi_before
- savi_during
- msavi_before
- msavi_during
- mtvi2_before
- mtvi2_during
- vari_before
- vari_during
- tgi_before
- tgi_during"
- LABELS = ["class"]
- # Scale of computation
- SCALE = 10
- # use seed for reproducibility
- USE_SEED = True
- SEED = 42
- # print info of a single dataset while loading
- PRINT_INFO = True
- # patch size for training
- PATCH_SHAPE = (256, 256)
- # buffer for prediction purpose
- # Half this will extend on the sides of each patch.
- # if zero; does not do buffer
- # else specify the size as tuple (e.g. 72 x 72)
- KERNEL_BUFFER = 0
- # Sizes of the training and evaluation datasets.
- TRAIN_SIZE = 8531
- TEST_SIZE = 1222
- VAL_SIZE = 2404
- # Specify model training parameters.
- BATCH_SIZE = 64
- EPOCHS = 5
- RAMPUP_EPOCHS = 25
- SUSTAIN_EPOCHS = 20
- # Rates
- USE_ADJUSTED_LR = False
- MAX_LR = 1E-3
- MID_LR = 3E-4
- MIN_LR = 1E-4
- DROPOUT_RATE = 0.2
- # other params w/ notes
- CALLBACK_PARAMETER = "val_loss"
- EARLY_STOPPING = False
- # choices are: cnn, dnn, unet,
- MODEL_TYPE = "unet"
- # Needs fixing;
- # using GCP AI Platform needs a wrapped model; wrapped and original model are obtained but inference not working
- USE_AI_PLATFORM = False
- # dnn does not do augmentation
- TRANSFORM_DATA = True
- # ACTIVATION_FN autogenerated in the main script
- # if n_class = 1: activation_fn = "sigmoid"
- # else: activation_fn = "softmax"
- ACTIVATION_FN = "softmax"
- OPTIMIZER = "adam"
- # standard or custom available
- # current custom available are: "custom_focal_tversky_loss"
- LOSS = "categorical_crossentropy"
- # "cropland_etc", "rice", "forest", "urban", "others_water_etc"
- OUT_CLASS_NUM = 5
- # Needs fixing
- USE_BEST_MODEL_FOR_INFERENCE = False
- # EE settings
- USE_SERVICE_ACCOUNT = False
- EE_SERVICE_CREDENTIALS = "your-service-credentials.json"
- # where the prediction output will be stored
- EE_OUTPUT_ASSET = "projects/aces/prediction"
- # output prediction name for both asset, locally (in TF Format) and gcs output (in TFRecord format)
- OUTPUT_NAME = "prediction_unet"
- # GCS settings
- GCS_PROJECT = "mygcs-aces"
- GCS_BUCKET = "mygcs-aces"
- # prediction image directory
- GCS_IMAGE_DIR = "prediction_images"
- # prediction image prefix
- GCS_IMAGE_PREFIX = "image_2021"
- # Vertex AI Settings
- # Vertex AI Model Save Directory
- GCS_VERTEX_MODEL_SAVE_DIR = "model_save"
- # Region
- GCS_REGION = "us-central1"
- GCS_VERTEX_CONTAINER_IMAGE = "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-11:latest"
- # Get your machine type here: https://cloud.google.com/vertex-ai/docs/predictions/configure-compute
- GCP_MACHINE_TYPE = "c2-standard-60"
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement