gptkbp:instanceOf
|
gptkb:graphics_card
|
gptkbp:architecture
|
gptkb:Turing
|
gptkbp:coreCount
|
2560
|
gptkbp:formFactor
|
gptkb:PCIe
|
gptkbp:halfPrecisionPerformance
|
65 TFLOPS
|
https://www.w3.org/2000/01/rdf-schema#label
|
Nvidia T4
|
gptkbp:int4Performance
|
260 TOPS
|
gptkbp:int8Performance
|
130 TOPS
|
gptkbp:interface
|
gptkb:PCI_Express_3.0_x16
|
gptkbp:lowProfile
|
true
|
gptkbp:manufacturer
|
gptkb:Nvidia
|
gptkbp:memoryBusWidth
|
320 GB/s
|
gptkbp:memoryType
|
gptkb:GDDR6
|
gptkbp:output
|
none
|
gptkbp:passivelyCooled
|
true
|
gptkbp:processNode
|
12 nm
|
gptkbp:product
|
https://www.nvidia.com/en-us/data-center/tesla-t4/
|
gptkbp:RAM
|
16 GB
|
gptkbp:releaseDate
|
2018
|
gptkbp:singlePrecisionPerformance
|
8.1 TFLOPS
|
gptkbp:supports
|
gptkb:machine_learning
gptkb:NVIDIA_TensorRT
gptkb:NVIDIA_GPU_Cloud
gptkb:NVIDIA_vGPU
gptkb:NVIDIA_NVDEC
gptkb:NVIDIA_CUDA
gptkb:NVIDIA_NVENC
deep learning
virtualization
AI inference
virtual desktop infrastructure
FP16
INT8
FP32
video transcoding
|
gptkbp:supportsECCMemory
|
no
|
gptkbp:targetMarket
|
gptkb:cloud_service
|
gptkbp:TDP
|
70 W
|
gptkbp:Tensor_Cores
|
320
|
gptkbp:bfsParent
|
gptkb:Nvidia_DeepStream
|
gptkbp:bfsLayer
|
6
|