7464aca44e73f72998c411e999e085e4fdfd7568,tests/models/test_cpu.py,,test_cpu_slurm_save_load,#Any#Any#,29

Before Change


@pytest.mark.parametrize("enable_pl_optimizer", [False, True])
def test_cpu_slurm_save_load(enable_pl_optimizer, tmpdir):
    Verify model save/load/checkpoint on CPU.
    hparams = EvalModelTemplate.get_default_hparams()
    model = EvalModelTemplate(**hparams)

    // logger file to get meta
    logger = tutils.get_default_logger(tmpdir)
    version = logger.version

    // fit model
    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=1,
        logger=logger,
        limit_train_batches=0.2,
        limit_val_batches=0.2,
        callbacks=[ModelCheckpoint(dirpath=tmpdir)],
        enable_pl_optimizer=enable_pl_optimizer,
    )
    result = trainer.fit(model)
    real_global_step = trainer.global_step

    // traning complete
    assert result == 1, "cpu model failed to complete"

    // predict with trained model before saving
    // make a prediction
    dataloaders = model.test_dataloader()
    if not isinstance(dataloaders, list):
        dataloaders = [dataloaders]

    for dataloader in dataloaders:
        for batch in dataloader:
            break

    x, y = batch
    x = x.view(x.size(0), -1)

    model.eval()
    pred_before_saving = model(x)

    // test HPC saving
    // simulate snapshot on slurm
    saved_filepath = trainer.checkpoint_connector.hpc_save(trainer.weights_save_path, logger)
    assert os.path.exists(saved_filepath)

    // new logger file to get meta
    logger = tutils.get_default_logger(tmpdir, version=version)

    trainer = Trainer(
        default_root_dir=tmpdir,
        max_epochs=1,
        logger=logger,
        callbacks=[ModelCheckpoint(dirpath=tmpdir)],
        enable_pl_optimizer=enable_pl_optimizer,
    )
    model = EvalModelTemplate(**hparams)

    // set the epoch start hook so we can predict before the model does the full training
    def assert_pred_same():
        assert trainer.global_step == real_global_step and trainer.global_step > 0

After Change


    // new logger file to get meta
    logger = tutils.get_default_logger(tmpdir, version=version)

    model = BoringModel()

    class _StartCallback(Callback):
        // set the epoch start hook so we can predict before the model does the full training
        def on_train_epoch_start(self, trainer, model):
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 5

Non-data size: 6

Instances


Project Name: williamFalcon/pytorch-lightning
Commit Name: 7464aca44e73f72998c411e999e085e4fdfd7568
Time: 2021-01-07
Author: gianluca@scarpellini.dev
File Name: tests/models/test_cpu.py
Class Name:
Method Name: test_cpu_slurm_save_load


Project Name: williamFalcon/pytorch-lightning
Commit Name: 8e9a026bc34d8409faa572a7144c2d96a7c039ed
Time: 2021-02-11
Author: rohitgr1998@gmail.com
File Name: tests/models/test_restore.py
Class Name:
Method Name: test_dp_resume


Project Name: williamFalcon/pytorch-lightning
Commit Name: f4cc7451a94010a572480c43ad5f0af7ad52cd21
Time: 2021-03-10
Author: eliacereda@gmail.com
File Name: tests/trainer/test_config_validator.py
Class Name:
Method Name: test_wrong_train_setting


Project Name: williamFalcon/pytorch-lightning
Commit Name: f4cc7451a94010a572480c43ad5f0af7ad52cd21
Time: 2021-03-10
Author: eliacereda@gmail.com
File Name: tests/trainer/test_config_validator.py
Class Name:
Method Name: test_test_loop_config


Project Name: williamFalcon/pytorch-lightning
Commit Name: 7464aca44e73f72998c411e999e085e4fdfd7568
Time: 2021-01-07
Author: gianluca@scarpellini.dev
File Name: tests/models/test_cpu.py
Class Name:
Method Name: test_cpu_slurm_save_load


Project Name: williamFalcon/pytorch-lightning
Commit Name: f4cc7451a94010a572480c43ad5f0af7ad52cd21
Time: 2021-03-10
Author: eliacereda@gmail.com
File Name: tests/trainer/test_config_validator.py
Class Name:
Method Name: test_val_loop_config