5dc3e70446c09312e2bd6dbe263b9af6f8405e62,scikits/cuda/special.py,,exp1,#Any#,145

Before Change


    

    if z_gpu.dtype == np.complex64:
        use_double = 0
    elif z_gpu.dtype == np.complex128:
        use_double = 1
    else:
        raise ValueError("unsupported type")

    
    // Get block/grid sizes; the number of threads per block is limited
    // to 256 because the kernel defined above uses too many
    // registers to be invoked more threads per block:
    dev = misc.get_current_device()
    max_threads_per_block = 256
    block_dim, grid_dim = \
               misc.select_block_grid_sizes(dev, z_gpu.shape, max_threads_per_block)

    // Set this to False when debugging to make sure the compiled kernel is
    // not cached:
    cache_dir=None
    expi_mod = \
             SourceModule(expi_template.substitute(use_double=use_double),
                          cache_dir=cache_dir,
                          options=["-I", install_headers])
    exp1_func = expi_mod.get_function("exp1_array")

    e_gpu = gpuarray.empty_like(z_gpu)
    exp1_func(z_gpu, e_gpu,
              np.uint32(z_gpu.size),

After Change


        exp1.cache[z_gpu.dtype] = func

    e_gpu = gpuarray.empty_like(z_gpu)
    func(z_gpu, e_gpu)

    return e_gpu
exp1.cache = {}
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 10

Instances


Project Name: lebedov/scikit-cuda
Commit Name: 5dc3e70446c09312e2bd6dbe263b9af6f8405e62
Time: 2014-05-04
Author: lev@columbia.edu
File Name: scikits/cuda/special.py
Class Name:
Method Name: exp1


Project Name: lebedov/scikit-cuda
Commit Name: 5dc3e70446c09312e2bd6dbe263b9af6f8405e62
Time: 2014-05-04
Author: lev@columbia.edu
File Name: scikits/cuda/special.py
Class Name:
Method Name: sici


Project Name: lebedov/scikit-cuda
Commit Name: 5dc3e70446c09312e2bd6dbe263b9af6f8405e62
Time: 2014-05-04
Author: lev@columbia.edu
File Name: scikits/cuda/special.py
Class Name:
Method Name: expi