Révision 296 ETSN/MySteps_6.py

MySteps_6.py (revision 296)
289 289
    Device=0
290 290
    Calls=1
291 291
    Threads=1
292

  
292
    Serial=True
293
    
293 294
    import getopt
294 295

  
295
    HowToUse='%s -g <CUDA/OpenCL> -s <SizeOfVector> -d <DeviceId> -c <SillyCalls> -t <Threads>'
296
    HowToUse='%s -n -g <CUDA/OpenCL> -s <SizeOfVector> -d <DeviceId> -c <SillyCalls> -t <Threads>'
296 297
    
297 298
    try:
298
        opts, args = getopt.getopt(sys.argv[1:],"hg:s:d:c:t:",["gpustyle=","size=","device=","calls=","threads="])
299
        opts, args = getopt.getopt(sys.argv[1:],"hng:s:d:c:t:",["gpustyle=","size=","device=","calls=","threads="])
299 300
    except getopt.GetoptError:
300 301
        print(HowToUse % sys.argv[0])
301 302
        sys.exit(2)
......
347 348
            GpuStyle = arg
348 349
        elif opt in ("-s", "--size"):
349 350
            SIZE = int(arg)
351
        elif opt in ("-n"):
352
            Serial = False
350 353

  
351 354
    print("Device Selection : %i" % Device)
352 355
    print("GpuStyle used : %s" % GpuStyle)
353 356
    print("Size of complex vector : %i" % SIZE)
354 357
    print("Number of silly calls : %i" % Calls)
355 358
    print("Number of Threads : %i" % Threads)
359
    print("Serial compute : %i" % Serial)
356 360

  
357 361
    if GpuStyle=='CUDA':
358 362
        try:
......
391 395
    b_np = np.random.rand(SIZE).astype(np.float32)
392 396

  
393 397
    # Native Implementation
394
    TimeIn=time.time()
395
    res_np=NativeSillyAddition(a_np,b_np,Calls)
396
    NativeElapsed=time.time()-TimeIn
397
    NativeRate=int(SIZE/NativeElapsed)
398
    print("NativeRate: %i" % NativeRate)
398
    if Serial:
399
        TimeIn=time.time()
400
        res_np=NativeSillyAddition(a_np,b_np,Calls)
401
        NativeElapsed=time.time()-TimeIn
402
        NativeRate=int(SIZE/NativeElapsed)
403
        print("NativeRate: %i" % NativeRate)
399 404

  
400 405
    # OpenCL Implementation
401 406
    if GpuStyle=='OpenCL' or GpuStyle=='all':
......
406 411
        OpenCLRate=int(SIZE/OpenCLElapsed)
407 412
        print("OpenCLRate: %i" % OpenCLRate)
408 413
        # Check on OpenCL with Numpy:
409
        print(res_cl - res_np)
410
        print(np.linalg.norm(res_cl - res_np))
411
        try:
412
            assert np.allclose(res_np, res_cl)
413
        except:
414
            print("Results between Native & OpenCL seem to be too different!")
414
        if Serial:
415
            print(res_cl - res_np)
416
            print(np.linalg.norm(res_cl - res_np))
417
            try:
418
                assert np.allclose(res_np, res_cl)
419
            except:
420
                print("Results between Native & OpenCL seem to be too different!")
415 421

  
416
        print("OpenCLvsNative ratio: %f" % (OpenCLRate/NativeRate))
422
            print("OpenCLvsNative ratio: %f" % (OpenCLRate/NativeRate))
417 423
            
418 424
    # CUDA Implementation
419 425
    if GpuStyle=='CUDA' or GpuStyle=='all':
......
423 429
        CUDARate=int(SIZE/CUDAElapsed)
424 430
        print("CUDARate: %i" % CUDARate)
425 431
        # Check on CUDA with Numpy:
426
        print(res_cuda - res_np)
427
        print(np.linalg.norm(res_cuda - res_np))
428
        try:
429
            assert np.allclose(res_np, res_cuda)
430
        except:
431
            print("Results between Native & CUDA seem to be too different!")
432
        if Serial:
433
            print(res_cuda - res_np)
434
            print(np.linalg.norm(res_cuda - res_np))
435
            try:
436
                assert np.allclose(res_np, res_cuda)
437
            except:
438
                print("Results between Native & CUDA seem to be too different!")
432 439
    
433
        print("CUDAvsNative ratio: %f" % (CUDARate/NativeRate))
440
            print("CUDAvsNative ratio: %f" % (CUDARate/NativeRate))
434 441
    
435 442
        
436 443

  

Formats disponibles : Unified diff