Overte C++ Documentation
VKBackend.h
1 //
2 // Created by Bradley Austin Davis on 2016/08/07
3 // Adapted for Vulkan in 2022-2025 by dr Karol Suprynowicz.
4 // Copyright 2013-2018 High Fidelity, Inc.
5 // Copyright 2023-2025 Overte e.V.
6 //
7 // Distributed under the Apache License, Version 2.0.
8 // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
9 // SPDX-License-Identifier: Apache-2.0
10 //
11 
12 #ifndef hifi_gpu_vk_VKBackend_h
13 #define hifi_gpu_vk_VKBackend_h
14 
15 #include <assert.h>
16 #include <functional>
17 #include <memory>
18 #include <bitset>
19 #include <queue>
20 #include <utility>
21 #include <list>
22 #include <array>
23 
24 #include <gpu/Forward.h>
25 #include <gpu/Context.h>
26 
27 #include <vk/Config.h>
28 #include <vk/Context.h>
29 #include <vk/VulkanDebug.h>
30 #include <vulkan/vulkan_core.h>
31 #include <glad/glad.h>
32 
33 #include "VKForward.h"
34 #include "../../../../vk/src/vk/Context.h"
35 
36 //#define GPU_STEREO_TECHNIQUE_DOUBLED_SMARTER
37 #define GPU_STEREO_TECHNIQUE_INSTANCED
38 
39 // Let these be configured by the one define picked above
40 #ifdef GPU_STEREO_TECHNIQUE_DOUBLED_SIMPLE
41 #define GPU_STEREO_DRAWCALL_DOUBLED
42 #endif
43 
44 #ifdef GPU_STEREO_TECHNIQUE_DOUBLED_SMARTER
45 #define GPU_STEREO_DRAWCALL_DOUBLED
46 #define GPU_STEREO_CAMERA_BUFFER
47 #endif
48 
49 #ifdef GPU_STEREO_TECHNIQUE_INSTANCED
50 #define GPU_STEREO_DRAWCALL_INSTANCED
51 #define GPU_STEREO_CAMERA_BUFFER
52 #endif
53 
54 namespace gpu { namespace vk {
55 
56 class VKAttachmentTexture;
57 
58 static const int MAX_NUM_UNIFORM_BUFFERS = 14; // There's also camera buffer at slot 15
59 
60 static const int32_t MIN_REQUIRED_TEXTURE_IMAGE_UNITS = 16;
61 static const int32_t MIN_REQUIRED_COMBINED_UNIFORM_BLOCKS = 70;
62 static const int32_t MIN_REQUIRED_COMBINED_TEXTURE_IMAGE_UNITS = 48;
63 static const int32_t MIN_REQUIRED_UNIFORM_BUFFER_BINDINGS = 36;
64 static const int32_t MIN_REQUIRED_UNIFORM_LOCATIONS = 1024;
65 
66 static const int MAX_NUM_RESOURCE_BUFFERS = 16;
67 static const int MAX_NUM_RESOURCE_TEXTURES = 16;
68 
69 class VKInputFormat : public GPUObject {
70 public:
71  static VKInputFormat* sync(const Stream::Format& inputFormat);
72 
73  VKInputFormat();
74  ~VKInputFormat();
75 
76  std::string key;
77 };
78 
79 class VKBackend : public Backend, public std::enable_shared_from_this<VKBackend> {
80  // Context Backend static interface required
81  friend class gpu::Context;
82 
83  static void init();
84  static BackendPointer createBackend();
85 
86 protected:
87  static const uint INVALID_SAVED_CAMERA_SLOT = (uint)-1;
88 
89  class FrameData;
90  // Allows for correction of the camera pose to account for changes
91  // between the time when a was recorded and the time(s) when it is
92  // executed
93  // Prev is the previous correction used at previous frame
94  struct PresentFrame {
95  mat4 correction;
96  mat4 correctionInverse;
97  mat4 unflippedCorrection;
98  mat4 flippedCorrection;
99  bool mirrorViewCorrection { false };
100  };
101 
102  struct UniformStageState;
103 
104  struct TransformStageState {
105 #ifdef GPU_STEREO_CAMERA_BUFFER
106  struct Cameras {
107  TransformCamera _cams[2];
108 
109  Cameras(){};
110  Cameras(const TransformCamera& cam) { _cams[0] = cam; };
111  Cameras(const TransformCamera& camL, const TransformCamera& camR) {
112  _cams[0] = camL;
113  _cams[1] = camR;
114  };
115  };
116 
117  using CameraBufferElement = Cameras;
118 #else
119  using CameraBufferElement = TransformCamera;
120 #endif
121  using TransformCameras = std::vector<CameraBufferElement>;
122 
123  struct ViewProjectionState {
124  Transform _view;
125  Transform _correctedView;
126  Transform _previousCorrectedView;
127  Mat4 _projection;
128  Mat4 _previousProjection;
129  bool _viewIsCamera;
130 
131  void copyExceptPrevious(const ViewProjectionState& other) {
132  _view = other._view;
133  _correctedView = other._correctedView;
134  _projection = other._projection;
135  _viewIsCamera = other._viewIsCamera;
136  }
137  };
138 
139  struct SaveTransform {
140  ViewProjectionState _state;
141  size_t _cameraOffset { INVALID_OFFSET };
142  };
143 
144  TransformCamera _camera;
145  TransformCameras _cameras;
146  std::array<SaveTransform, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT> _savedTransforms;
147 
148  mutable std::map<std::string, VkDeviceSize> _drawCallInfoOffsets;
149 
150  //uint32_t _objectBufferTexture{ 0 };
151  size_t _cameraUboSize{ 0 };
152  ViewProjectionState _viewProjectionState;
153  uint _currentSavedTransformSlot { INVALID_SAVED_CAMERA_SLOT };
154  bool _skybox{ false };
155  Transform _view;
156  PresentFrame _presentFrame;
157  bool _viewCorrectionEnabled{ true };
158  // This is set by frame player to override camera correction setting
159  bool _viewCorrectionEnabledForFramePlayer{ false };
160 
161  struct Jitter {
162  std::vector<Vec2> _offsetSequence;
163  Vec2 _offset { 0.0f };
164  float _scale { 0.f };
165  unsigned int _currentSampleIndex { 0 };
166  bool _isEnabled { false };
167  };
168 
169  Jitter _projectionJitter;
170  Vec4i _viewport{ 0, 0, 1, 1 };
171  Vec2 _depthRange{ 0.0f, 1.0f };
172  bool _invalidView{ false };
173  bool _invalidProj{ false };
174  bool _invalidViewport{ false };
175 
176  bool _enabledDrawcallInfoBuffer{ false };
177 
178  using Pair = std::pair<size_t, size_t>;
179  using List = std::list<Pair>;
180  List _cameraOffsets;
181  mutable List::const_iterator _camerasItr;
182  mutable size_t _currentCameraOffset{ INVALID_OFFSET };
183 
184  void pushCameraBufferElement(const StereoState& stereo, const StereoState& prevStereo, TransformCameras& cameras) const;
185  void preUpdate(size_t commandIndex, const StereoState& stereo, const StereoState& prevStereo, Vec2u framebufferSize);
186  void update(size_t commandIndex, const StereoState& stereo, VKBackend::UniformStageState &uniform, FrameData &currentFrame) const;
187  void bindCurrentCamera(int stereoSide, VKBackend::UniformStageState &uniform, FrameData &currentFrame) const;
188  } _transform;
189 
190  void preUpdateTransform();
191  void transferTransformState(const Batch& batch);
192 
193 protected:
194  struct InputStageState {
195  bool _invalidFormat { true };
196  bool _lastUpdateStereoState { false };
197  FormatReference _format { GPU_REFERENCE_INIT_VALUE };
198  std::string _formatKey;
199 
200  typedef std::bitset<MAX_NUM_ATTRIBUTES> ActivationCache;
201  ActivationCache _attributeActivation { 0 };
202 
203  typedef std::bitset<MAX_NUM_INPUT_BUFFERS> BuffersState;
204 
205  BuffersState _invalidBuffers { 0 };
206  BuffersState _attribBindingBuffers { 0 };
207 
208  std::array<BufferReference, MAX_NUM_INPUT_BUFFERS> _buffers;
209  std::array<Offset, MAX_NUM_INPUT_BUFFERS> _bufferOffsets;
210  std::array<Offset, MAX_NUM_INPUT_BUFFERS> _bufferStrides;
211  std::array<VkBuffer, MAX_NUM_INPUT_BUFFERS> _bufferVBOs;
212 
213  BufferReference _indexBuffer;
214  Offset _indexBufferOffset { 0 };
215  Type _indexBufferType { UINT32 };
216 
217  BufferReference _indirectBuffer;
218  Offset _indirectBufferOffset { 0 };
219  Offset _indirectBufferStride { 0 };
220 
221  uint32_t _defaultVAO { 0 };
222 
223  void reset();
224  } _input;
225 
226  void resetInputStage();
227 
228  struct UniformStageState {
229  struct BufferState {
230  BufferReference buffer{};
231  uint32_t offset{ 0 }; // VKTODO: Vulkan uses 64-bit offset and size
232  uint32_t size{ 0 }; // VKTODO
233 
234  BufferState& operator=(const BufferState& other) = delete;
235  void reset() {
236  gpu::reset(buffer);
237  offset = 0;
238  size = 0;
239  }
240  };
241 
242  // MAX_NUM_UNIFORM_BUFFERS-1 is the max uniform index BATCHES are allowed to set, but
243  // MIN_REQUIRED_UNIFORM_BUFFER_BINDINGS is used here because the backend sets some
244  // internal UBOs for things like camera correction
245  std::array<BufferState, MIN_REQUIRED_UNIFORM_BUFFER_BINDINGS> _buffers;
246  } _uniform;
247 
248  void updateVkDescriptorWriteSetsUniform(VkDescriptorSet target);
249  void releaseUniformBuffer(uint32_t slot);
250  void resetUniformStage();
251 
252  struct ResourceStageState {
253  struct TextureState {
254  TextureReference texture{};
255  TextureState& operator=(const TextureState& other) = delete;
256  void reset() {
257  gpu::reset(texture);
258  }
259  };
260  struct BufferState {
261  BufferReference buffer{};
262  BufferState& operator=(const BufferState& other) = delete;
263  void reset() {
264  gpu::reset(buffer);
265  }
266  };
267  std::array<BufferState, MAX_NUM_RESOURCE_BUFFERS> _buffers{};
268  std::array<TextureState, MAX_NUM_RESOURCE_TEXTURES> _textures{};
269  } _resource;
270 
271  void updateVkDescriptorWriteSetsTexture(VkDescriptorSet target);
272  void bindResourceTexture(uint32_t slot, const TexturePointer& texture);
273  void releaseResourceTexture(uint32_t slot);
274  void resetTextureStage();
275 
276  void updateVkDescriptorWriteSetsStorage(VkDescriptorSet target);
277  void releaseResourceBuffer(uint32_t slot);
278  void resetResourceStage();
279 
280  // VKTODO
281  /*struct OutputStageState {
282  FramebufferReference _framebuffer{};
283  int _drawFBO{ 0 };
284  } _output;*/
285 
286  // VKTODO
287  struct QueryStageState {
288  uint32_t _rangeQueryDepth{ 0 };
289  } _queryStage;
290 
291  void resetQueryStage();
292 
293  VkRenderPass _currentVkRenderPass{ VK_NULL_HANDLE };
294  gpu::FramebufferReference _currentFramebuffer{ nullptr }; // Framebuffer used in currently happening render pass
295  VkFramebuffer _currentVkFramebuffer{ VK_NULL_HANDLE }; // Framebuffer used in currently happening render pass
296  bool _hasFramebufferChanged {false}; // Set to true when batch calls setFramebuffer command. Used to end render pass and update input image layouts.
297  // Checks if renderpass change is needed and changes it if required
298  void updateRenderPass();
299  void updateAttachmentLayoutsAfterRenderPass();
300  void resetRenderPass();
301 
302  // Contains objects that are created per frame and need to be deleted after the frame is rendered
303  class FrameData {
304  public:
305  std::vector<VkDescriptorSet> uniformDescriptorSets;
306  std::vector<VkDescriptorSet> textureDescriptorSets;
307  std::vector<VkDescriptorSet> storageDescriptorSets;
308  VkDescriptorPool _descriptorPool;
309  std::vector<std::shared_ptr<gpu::Buffer>> _buffers;
310  std::vector<VkRenderPass> _renderPasses; // VKTODO: add a lock? It depends on how we do transfer thread
311 
312  std::shared_ptr<gpu::Buffer> _objectBuffer;
313  std::shared_ptr<gpu::Buffer> _cameraBuffer;
314  std::shared_ptr<gpu::Buffer> _drawCallInfoBuffer;
315 
316  std::shared_ptr<gpu::Buffer> _glUniformBuffer; // Contains data from glUniform... calls
317  std::vector<uint8_t> _glUniformData;
318  std::unordered_map<int, size_t> _glUniformOffsetMap;
319  size_t _glUniformBufferPosition {0}; // Position where data from next glUniform... call is placed
320 
321  void addGlUniform(size_t size, const void *data, size_t commandIndex);
322 
323  FrameData(VKBackend *backend);
324  FrameData() = delete;
325  ~FrameData();
326  // Executed after the frame was rendered so that it can be reused
327  void cleanup(); // VKTODO
328  private:
329  // Creates descriptor pool for current frame
330  void createDescriptorPool();
331  VKBackend *_backend;
332  };
333 
334 private:
335  void draw(VkPrimitiveTopology mode, uint32 numVertices, uint32 startVertex);
336  void renderPassTransfer(const Batch& batch);
337  void renderPassDraw(const Batch& batch);
338  void transferGlUniforms();
339  void updateInput();
340  void updateTransform(const Batch& batch);
341  void updatePipeline();
342 
343  vk::VKFramebuffer* syncGPUObject(const Framebuffer *framebuffer);
344  VKBuffer* syncGPUObject(const Buffer *buffer);
345  VKTexture* syncGPUObject(const Texture *texture);
346  VKQuery* syncGPUObject(const Query *query);
347 
348  void blitToFramebuffer(VKAttachmentTexture &input, const Vec4i& srcViewport, VKAttachmentTexture &output, const Vec4i& dstViewport);
349 
350 public:
351  VKBackend();
352  ~VKBackend();
353  void shutdown() override;
354  vks::Context& getContext() { return _context; }
355  void syncProgram(const gpu::ShaderPointer& program) override {}
356  void syncCache() override {}
357  void recycle() const override {}
358  void updatePresentFrame(const Mat4& correction = Mat4(), bool primary = true) override;
359  void executeFrame(const FramePointer& frame) final;
360  void render(const Batch& batch) final;
361  bool isTextureManagementSparseEnabled() const override;
362  bool supportedTextureFormat(const gpu::Element& format) const override;
363  const std::string& getVersion() const override;
364  void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) final;
365  void setDrawCommandBuffer(VkCommandBuffer commandBuffer);
366  size_t getNumInputBuffers() const { return _input._invalidBuffers.size(); }
367  VkDescriptorImageInfo getDefaultTextureDescriptorInfo();
368  // Used by GPU frame player to move camera around
369  void enableContextViewCorrectionForFramePlayer() { _transform._viewCorrectionEnabledForFramePlayer = true; };
370  void setIsFramePlayer(bool isFramePlayer) { _isFramePlayer = isFramePlayer; };
371 
372  static gpu::Primitive getPrimitiveTopologyFromCommand(Batch::Command command, const Batch& batch, size_t paramOffset);
373 
374  int getRealUniformLocation(int location);
375 
376  virtual void store_glUniform1i(const Batch& batch, size_t paramOffset) final;
377  virtual void store_glUniform1f(const Batch& batch, size_t paramOffset) final;
378  virtual void store_glUniform2f(const Batch& batch, size_t paramOffset) final;
379  virtual void store_glUniform3f(const Batch& batch, size_t paramOffset) final;
380  virtual void store_glUniform4f(const Batch& batch, size_t paramOffset) final;
381  virtual void store_glUniform3fv(const Batch& batch, size_t paramOffset) final;
382  virtual void store_glUniform4fv(const Batch& batch, size_t paramOffset) final;
383  virtual void store_glUniform4iv(const Batch& batch, size_t paramOffset) final;
384  virtual void store_glUniformMatrix3fv(const Batch& batch, size_t paramOffset) final;
385  virtual void store_glUniformMatrix4fv(const Batch& batch, size_t paramOffset) final;
386 
387  // Draw Stage
388  virtual void do_draw(const Batch& batch, size_t paramOffset) final;
389  virtual void do_drawIndexed(const Batch& batch, size_t paramOffset) final;
390  virtual void do_drawInstanced(const Batch& batch, size_t paramOffset) final;
391  virtual void do_drawIndexedInstanced(const Batch& batch, size_t paramOffset) final;
392  virtual void do_multiDrawIndirect(const Batch& batch, size_t paramOffset) final;
393  virtual void do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOffset) final;
394 
395  // Input Stage
396  virtual void do_setInputFormat(const Batch& batch, size_t paramOffset) final;
397  virtual void do_setInputBuffer(const Batch& batch, size_t paramOffset) final;
398  virtual void do_setIndexBuffer(const Batch& batch, size_t paramOffset) final;
399  virtual void do_setIndirectBuffer(const Batch& batch, size_t paramOffset) final;
400  virtual void do_generateTextureMips(const Batch& batch, size_t paramOffset) final;
401  virtual void do_generateTextureMipsWithPipeline(const Batch& batch, size_t paramOffset) final;
402 
403  virtual void do_glUniform1i(const Batch& batch, size_t paramOffset) final;
404  virtual void do_glUniform1f(const Batch& batch, size_t paramOffset) final;
405  virtual void do_glUniform2f(const Batch& batch, size_t paramOffset) final;
406  virtual void do_glUniform3f(const Batch& batch, size_t paramOffset) final;
407  virtual void do_glUniform4f(const Batch& batch, size_t paramOffset) final;
408  virtual void do_glUniform3fv(const Batch& batch, size_t paramOffset) final;
409  virtual void do_glUniform4fv(const Batch& batch, size_t paramOffset) final;
410  virtual void do_glUniform4iv(const Batch& batch, size_t paramOffset) final;
411  virtual void do_glUniformMatrix3fv(const Batch& batch, size_t paramOffset) final;
412  virtual void do_glUniformMatrix4fv(const Batch& batch, size_t paramOffset) final;
413 
414  // Transform Stage
415  virtual void do_setModelTransform(const Batch& batch, size_t paramOffset) final;
416  virtual void do_setViewTransform(const Batch& batch, size_t paramOffset) final;
417  virtual void do_setProjectionTransform(const Batch& batch, size_t paramOffset) final;
418  virtual void do_setProjectionJitterEnabled(const Batch& batch, size_t paramOffset) final;
419  virtual void do_setProjectionJitterSequence(const Batch& batch, size_t paramOffset) final;
420  virtual void do_setProjectionJitterScale(const Batch& batch, size_t paramOffset) final;
421 
422  virtual void do_setViewportTransform(const Batch& batch, size_t paramOffset) final;
423  virtual void do_setDepthRangeTransform(const Batch& batch, size_t paramOffset) final;
424  virtual void do_saveViewProjectionTransform(const Batch& batch, size_t paramOffset) final;
425  virtual void do_setSavedViewProjectionTransform(const Batch& batch, size_t paramOffset) final;
426  virtual void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) final;
427 
428  // Uniform Stage
429  virtual void do_setUniformBuffer(const Batch& batch, size_t paramOffset) final;
430 
431  // Resource Stage
432  virtual void do_setResourceBuffer(const Batch& batch, size_t paramOffset) final;
433  virtual void do_setResourceTexture(const Batch& batch, size_t paramOffset) final;
434  virtual void do_setResourceTextureTable(const Batch& batch, size_t paramOffset) final;
435  virtual void do_setResourceFramebufferSwapChainTexture(const Batch& batch, size_t paramOffset) final;
436 
437  // Pipeline Stage
438  virtual void do_setPipeline(const Batch& batch, size_t paramOffset) final;
439 
440  // Output stage
441  virtual void do_setFramebuffer(const Batch& batch, size_t paramOffset) final;
442  virtual void do_setFramebufferSwapChain(const Batch& batch, size_t paramOffset) final;
443  virtual void do_clearFramebuffer(const Batch& batch, size_t paramOffset) final;
444  virtual void do_blit(const Batch& batch, size_t paramOffset) final;
445  virtual void do_advance(const Batch& batch, size_t paramOffset) final;
446  virtual void do_setStateBlendFactor(const Batch& batch, size_t paramOffset) final;
447  virtual void do_setStateScissorRect(const Batch& batch, size_t paramOffset) final;
448 
449  // Query section
450  virtual void do_beginQuery(const Batch& batch, size_t paramOffset) final;
451  virtual void do_endQuery(const Batch& batch, size_t paramOffset) final;
452  virtual void do_getQuery(const Batch& batch, size_t paramOffset) final;
453 
454  // Reset stages
455  virtual void do_resetStages(const Batch& batch, size_t paramOffset) final;
456 
457  virtual void do_disableContextViewCorrection(const Batch& batch, size_t paramOffset) final;
458  virtual void do_restoreContextViewCorrection(const Batch& batch, size_t paramOffset) final;
459  virtual void do_setContextMirrorViewCorrection(const Batch& batch, size_t paramOffset) final;
460 
461  virtual void do_disableContextStereo(const Batch& batch, size_t paramOffset) final;
462  virtual void do_restoreContextStereo(const Batch& batch, size_t paramOffset) final;
463 
464  // Other
465  virtual void do_runLambda(const Batch& batch, size_t paramOffset) final;
466  virtual void do_startNamedCall(const Batch& batch, size_t paramOffset) final;
467  virtual void do_stopNamedCall(const Batch& batch, size_t paramOffset) final;
468 
469  // Performance profiling markers
470  virtual void do_pushProfileRange(const Batch& batch, size_t paramOffset) final;
471  virtual void do_popProfileRange(const Batch& batch, size_t paramOffset) final;
472 
473 protected:
474  // Initializes parts of the backend that can't be initialized in the constuctor.
475  void initBeforeFirstFrame();
476 
477  void initTransform();
478  void initDefaultTexture();
479 
480  // Gets a frame data object from the pool and sets _currentFrame to point to it.
481  // Needs to be called before frame command buffers creation starts
482  void acquireFrameData();
483  // Called after frame command buffers are generated.
484  // Pointer needs to be kept until rendering finished.
485  void releaseFrameData() { _currentFrame.reset(); };
486 public:
487  // Called after frame finishes rendering. Cleans up and puts frame data object back to the pool.
488  void recyclePreviousFrame();
489  void waitForGPU();
490 
491  void releaseExternalTexture(GLuint id, const Texture::ExternalRecycler& recycler);
492 
493  // VKTODO: quick hack
494  VKFramebuffer *_outputTexture{ nullptr };
495 protected:
496  void transitionInputImageLayouts(); // This can be called only form `updateRenderPass`
497  void transitionAttachmentImageLayouts(gpu::Framebuffer &framebuffer); // This can be called only form `updateRenderPass`
498 
499  // These are filled by syncGPUObject() calls, and are needed to track backend objects so that they can be destroyed before
500  // destroying backend.
501  // Access to these objects happens only from the backend thread. Destructors don't access them directly, but through a recycler.
502  std::unordered_set<VKFramebuffer*> _framebuffers;
503  std::unordered_set<VKBuffer*> _buffers;
504  std::unordered_set<VKTexture*> _textures;
505  std::unordered_set<VKQuery*> _queries;
506  void perFrameCleanup();
507  // Called by the destructor
508  void beforeShutdownCleanup();
509  void dumpVmaMemoryStats();
510 
511  std::mutex _externalTexturesMutex;
512  std::list<std::pair<GLuint, Texture::ExternalRecycler>> _externalTexturesTrash;
513 
514  // Logical device, application's view of the physical device (GPU)
515  // VkPipeline cache object
516  VkPipelineCache _pipelineCache;
517 
518  vks::Context& _context{ vks::Context::get() };
519  std::shared_ptr<gpu::Texture> _defaultTexture;
520  VKTexture* _defaultTextureVk{ nullptr };
521  VkDescriptorImageInfo _defaultTextureImageInfo{};
522  std::shared_ptr<gpu::Texture> _defaultSkyboxTexture;
523  VKTexture* _defaultSkyboxTextureVk{ nullptr };
524  VkDescriptorImageInfo _defaultSkyboxTextureImageInfo{};
525  friend class VKBuffer;
526  friend class VKFramebuffer;
527  VkCommandBuffer _currentCommandBuffer;
528  size_t _commandIndex{ 0 };
529  int _currentDraw{ -1 };
530  bool _inRenderTransferPass{ false };
531  // VKTODO: maybe move to _transform?
532  Vec4i _currentScissorRect{ 0 };
533  // This allows for one frame to be renderer while commands are generated for next one already
534  std::vector<std::shared_ptr<FrameData>> _framePool;
535  std::deque<std::shared_ptr<FrameData>> _framesToReuse;
536  // Frame for which commands are currently generated
537  std::shared_ptr<FrameData> _currentFrame;
538  // Frame for which command buffer is already generated and it's currently being rendered.
539  std::shared_ptr<FrameData> _currentlyRenderedFrame;
540  // Frame that was previously rendered. Can be recycled after waiting for _vkWindow->_previousFrameFence.
541  std::shared_ptr<FrameData> _previouslyRenderedFrame;
542  size_t _frameCounter{ 0 };
543 
544  // Safety check to ensure that shutdown was completed before destruction.
545  std::atomic<bool> isBackendShutdownComplete{ false };
546 
547  typedef void (VKBackend::*CommandCall)(const Batch&, size_t);
548  static std::array<VKBackend::CommandCall, Batch::NUM_COMMANDS> _commandCalls;
549  static const size_t INVALID_OFFSET = (size_t)-1;
550  static size_t UNIFORM_BUFFER_OFFSET_ALIGNMENT;
551  bool _isFramePlayer {false};
552  bool _isInitialized {false};
553 };
554 
555 }} // namespace gpu::vulkan
556 
557 #endif
Provides the Mat4 scripting interface.
Definition: Mat4.h:44
A simple object wrapper for an OpenGL texture.
Definition: material-networking/src/material-networking/TextureCache.h:39