1f7492f81063cd039201b7b2e00a547547362d06,dipy/reconst/peaks.py,,_peaks_from_model_parallel,#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#,140

Before Change


    n = data.shape[0]
    nbr_chunks = nbr_processes ** 2
    chunk_size = int(np.ceil(n / nbr_chunks))
    indices = zip(np.arange(0, n, chunk_size),
                  np.arange(0, n, chunk_size) + chunk_size)

    with InTemporaryDirectory() as tmpdir:

        data_file_name = path.join(tmpdir, "data.npy")
        np.save(data_file_name, data)
        if mask is not None:
            mask = mask.flatten()
            mask_file_name = path.join(tmpdir, "mask.npy")
            np.save(mask_file_name, mask)
        else:
            mask_file_name = None

        pool = Pool(nbr_processes)

        pam_res = pool.map(_peaks_from_model_parallel_sub,
                           zip(repeat((data_file_name, mask_file_name)),
                               indices,
                               repeat(model),
                               repeat(sphere),
                               repeat(relative_peak_threshold),
                               repeat(min_separation_angle),
                               repeat(return_odf),
                               repeat(return_sh),
                               repeat(gfa_thr),
                               repeat(normalize_peaks),
                               repeat(sh_order),
                               repeat(sh_basis_type),
                               repeat(npeaks),
                               repeat(B),
                               repeat(invB)))
        pool.close()

        pam = PeaksAndMetrics()

        // use memmap to reduce the memory usage
        pam.gfa = np.memmap(path.join(tmpdir, "gfa.npy"),
                            dtype=pam_res[0].gfa.dtype,
                            mode="w+",
                            shape=(data.shape[0]))

        pam.peak_dirs = np.memmap(path.join(tmpdir, "peak_dirs.npy"),
                                  dtype=pam_res[0].peak_dirs.dtype,
                                  mode="w+",
                                  shape=(data.shape[0], npeaks, 3))
        pam.peak_values = np.memmap(path.join(tmpdir, "peak_values.npy"),
                                    dtype=pam_res[0].peak_values.dtype,
                                    mode="w+",
                                    shape=(data.shape[0], npeaks))
        pam.peak_indices = np.memmap(path.join(tmpdir, "peak_indices.npy"),
                                     dtype=pam_res[0].peak_indices.dtype,
                                     mode="w+",
                                     shape=(data.shape[0], npeaks))
        pam.qa = np.memmap(path.join(tmpdir, "qa.npy"),
                           dtype=pam_res[0].qa.dtype,
                           mode="w+",
                           shape=(data.shape[0], npeaks))
        if return_sh:
            nbr_shm_coeff = (sh_order + 2) * (sh_order + 1) / 2
            pam.shm_coeff = np.memmap(path.join(tmpdir, "shm.npy"),
                                      dtype=pam_res[0].shm_coeff.dtype,
                                      mode="w+",
                                      shape=(data.shape[0], nbr_shm_coeff))
            pam.B = pam_res[0].B
        else:
            pam.shm_coeff = None
            pam.invB = None
        if return_odf:
            pam.odf = np.memmap(path.join(tmpdir, "odf.npy"),
                                dtype=pam_res[0].odf.dtype,
                                mode="w+",
                                shape=(data.shape[0], len(sphere.vertices)))
        else:
            pam.odf = None

        // copy subprocesses pam to a single pam (memmaps)
        for i, (start_pos, end_pos) in enumerate(indices):
            pam.gfa[start_pos: end_pos] = pam_res[i].gfa[:]
            pam.peak_dirs[start_pos: end_pos] = pam_res[i].peak_dirs[:]
            pam.peak_values[start_pos: end_pos] = pam_res[i].peak_values[:]
            pam.peak_indices[start_pos: end_pos] = pam_res[i].peak_indices[:]
            pam.qa[start_pos: end_pos] = pam_res[i].qa[:]
            if return_sh:
                pam.shm_coeff[start_pos: end_pos] = pam_res[i].shm_coeff[:]
            if return_odf:
                pam.odf[start_pos: end_pos] = pam_res[i].odf[:]

        pam_res = None

        // load memmaps to arrays and reshape the metric
        shape[-1] = -1
        pam.gfa = np.reshape(np.array(pam.gfa), shape[:-1])
        pam.peak_dirs = np.reshape(np.array(pam.peak_dirs), shape[:] + [3])
        pam.peak_values = np.reshape(np.array(pam.peak_values), shape[:])
        pam.peak_indices = np.reshape(np.array(pam.peak_indices), shape[:])
        pam.qa = np.reshape(np.array(pam.qa), shape[:])
        if return_sh:
            pam.shm_coeff = np.reshape(np.array(pam.shm_coeff), shape[:])
        if return_odf:
            pam.odf = np.reshape(np.array(pam.odf), shape[:])

After Change


    n = data.shape[0]
    nbr_chunks = nbr_processes ** 2
    chunk_size = int(np.ceil(n / nbr_chunks))
    indices = list(zip(np.arange(0, n, chunk_size),
                       np.arange(0, n, chunk_size) + chunk_size))

    with InTemporaryDirectory() as tmpdir:

        data_file_name = path.join(tmpdir, "data.npy")
        np.save(data_file_name, data)
        if mask is not None:
            mask = mask.flatten()
            mask_file_name = path.join(tmpdir, "mask.npy")
            np.save(mask_file_name, mask)
        else:
            mask_file_name = None

        pool = Pool(nbr_processes)

        pam_res = pool.map(_peaks_from_model_parallel_sub,
                           zip(repeat((data_file_name, mask_file_name)),
                               indices,
                               repeat(model),
                               repeat(sphere),
                               repeat(relative_peak_threshold),
                               repeat(min_separation_angle),
                               repeat(return_odf),
                               repeat(return_sh),
                               repeat(gfa_thr),
                               repeat(normalize_peaks),
                               repeat(sh_order),
                               repeat(sh_basis_type),
                               repeat(npeaks),
                               repeat(B),
                               repeat(invB)))
        pool.close()

        pam = PeaksAndMetrics()
        // use memmap to reduce the memory usage
        pam.gfa = np.memmap(path.join(tmpdir, "gfa.npy"),
                            dtype=pam_res[0].gfa.dtype,
                            mode="w+",
                            shape=(data.shape[0]))

        pam.peak_dirs = np.memmap(path.join(tmpdir, "peak_dirs.npy"),
                                  dtype=pam_res[0].peak_dirs.dtype,
                                  mode="w+",
                                  shape=(data.shape[0], npeaks, 3))
        pam.peak_values = np.memmap(path.join(tmpdir, "peak_values.npy"),
                                    dtype=pam_res[0].peak_values.dtype,
                                    mode="w+",
                                    shape=(data.shape[0], npeaks))
        pam.peak_indices = np.memmap(path.join(tmpdir, "peak_indices.npy"),
                                     dtype=pam_res[0].peak_indices.dtype,
                                     mode="w+",
                                     shape=(data.shape[0], npeaks))
        pam.qa = np.memmap(path.join(tmpdir, "qa.npy"),
                           dtype=pam_res[0].qa.dtype,
                           mode="w+",
                           shape=(data.shape[0], npeaks))
        if return_sh:
            nbr_shm_coeff = (sh_order + 2) * (sh_order + 1) / 2
            pam.shm_coeff = np.memmap(path.join(tmpdir, "shm.npy"),
                                      dtype=pam_res[0].shm_coeff.dtype,
                                      mode="w+",
                                      shape=(data.shape[0], nbr_shm_coeff))
            pam.B = pam_res[0].B
        else:
            pam.shm_coeff = None
            pam.invB = None
        if return_odf:
            pam.odf = np.memmap(path.join(tmpdir, "odf.npy"),
                                dtype=pam_res[0].odf.dtype,
                                mode="w+",
                                shape=(data.shape[0], len(sphere.vertices)))
        else:
            pam.odf = None

        print(indices)
        // copy subprocesses pam to a single pam (memmaps)
        for i, (start_pos, end_pos) in enumerate(indices):
            pam.gfa[start_pos: end_pos] = pam_res[i].gfa[:]
            pam.peak_dirs[start_pos: end_pos] = pam_res[i].peak_dirs[:]
            pam.peak_values[start_pos: end_pos] = pam_res[i].peak_values[:]
            pam.peak_indices[start_pos: end_pos] = pam_res[i].peak_indices[:]
            pam.qa[start_pos: end_pos] = pam_res[i].qa[:]
            if return_sh:
                pam.shm_coeff[start_pos: end_pos] = pam_res[i].shm_coeff[:]
            if return_odf:
                pam.odf[start_pos: end_pos] = pam_res[i].odf[:]

        pam_res = None

        // load memmaps to arrays and reshape the metric
        shape[-1] = -1
        pam.gfa = np.array(pam.gfa)
        pam.gfa = np.reshape(pam.gfa, shape[:-1])
        pam.peak_dirs = np.reshape(np.array(pam.peak_dirs), shape[:] + [3])
        pam.peak_values = np.reshape(np.array(pam.peak_values), shape[:])
        pam.peak_indices = np.reshape(np.array(pam.peak_indices), shape[:])
        pam.qa = np.reshape(np.array(pam.qa), shape[:])
        if return_sh:
            pam.shm_coeff = np.reshape(np.array(pam.shm_coeff), shape[:])
        if return_odf:
            pam.odf = np.reshape(np.array(pam.odf), shape[:])
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 4

Non-data size: 7

Instances


Project Name: nipy/dipy
Commit Name: 1f7492f81063cd039201b7b2e00a547547362d06
Time: 2013-11-18
Author: girard.gabriel@gmail.com
File Name: dipy/reconst/peaks.py
Class Name:
Method Name: _peaks_from_model_parallel


Project Name: tflearn/tflearn
Commit Name: e322665f17f0a35e2d5fe6163fc558d5b9deba7e
Time: 2016-04-04
Author: brettnaul@gmail.com
File Name: tflearn/helpers/trainer.py
Class Name: TrainOp
Method Name: initialize_training_ops


Project Name: HazyResearch/pdftotree
Commit Name: ff0fa66f8997121dccb421f82b0eadc3e21688e2
Time: 2018-04-04
Author: lwhsiao@stanford.edu
File Name: pdftotree/utils/pdf/grid.py
Class Name: Grid
Method Name: __init__


Project Name: apache/incubator-mxnet
Commit Name: 566905f2421599d83c446ecb0b25176238f7a909
Time: 2020-01-19
Author: 1483586698@qq.com
File Name: example/speech_recognition/stt_io_bucketingiter.py
Class Name: BucketSTTIter
Method Name: __init__