gptkbp:instance_of
|
gptkb:Graphics_Processing_Unit
|
gptkbp:ai
|
624 TOPS
|
gptkbp:architecture
|
gptkb:Ampere
|
gptkbp:compatibility
|
gptkb:Tensor_RT
gptkb:cu_DNN
gptkb:NVIDIA_HPC_SDK
CUDA 11
|
gptkbp:connects
|
gptkb:NVLink
|
gptkbp:deep_learning_performance
|
312 TFLOPS
|
gptkbp:form_factor
|
SXM4
|
gptkbp:has_units
|
108
|
https://www.w3.org/2000/01/rdf-schema#label
|
A100 SXM4
|
gptkbp:manufacturer
|
gptkb:NVIDIA
|
gptkbp:max_block_size
|
1024
|
gptkbp:max_compute_capability
|
8.0
|
gptkbp:max_concurrent_kernel
|
gptkb:7
|
gptkbp:max_grid_size
|
65535
|
gptkbp:max_registers_per_block
|
65536
|
gptkbp:max_shared_memory_per_block
|
49152 bytes
|
gptkbp:max_surface_memory
|
16384 bytes
|
gptkbp:max_texture_memory
|
16384 bytes
|
gptkbp:max_threads_per_block
|
1024
|
gptkbp:max_threads_per_multiprocessor
|
gptkb:2048
|
gptkbp:memory_type
|
gptkb:HBM2
|
gptkbp:number_of_cores
|
432
6912
|
gptkbp:nvswitch_support
|
gptkb:Yes
|
gptkbp:pciexpress_version
|
gptkb:Yes
|
gptkbp:performance
|
19.5 TFLOPS
9.7 TFLOPS
|
gptkbp:power_connector
|
8-pin PCIe
|
gptkbp:powers
|
400 W
|
gptkbp:ram
|
40 GB
1555 GB/s
|
gptkbp:release_date
|
gptkb:2020
|
gptkbp:slisupport
|
gptkb:Yes
|
gptkbp:target_market
|
gptkb:cloud_computing
Scientific Research
Data Centers
Enterprise AI
|
gptkbp:tdp
|
400 W
|
gptkbp:thermal_design_power
|
400 W
|
gptkbp:use_case
|
gptkb:Deep_Learning
gptkb:Data_Analytics
High-Performance Computing
AI Training
|
gptkbp:bfsParent
|
gptkb:NVIDIA_A100_Tensor_Core_GPU
|
gptkbp:bfsLayer
|
5
|