convert : support contiguous method on lora tensors (#20489)

This commit is contained in:
Sigbjørn Skjæret
2026-03-15 12:15:12 +01:00
committed by GitHub
parent b9da4444df
commit 89d0aec042

View File

@@ -128,6 +128,12 @@ class LoraTorchTensor:
assert dim is None assert dim is None
return self.shape return self.shape
def contiguous(self) -> LoraTorchTensor:
return LoraTorchTensor(
self._lora_A.contiguous(),
self._lora_B.contiguous(),
)
def reshape(self, *shape: int | tuple[int, ...]) -> LoraTorchTensor: def reshape(self, *shape: int | tuple[int, ...]) -> LoraTorchTensor:
if isinstance(shape[0], tuple): if isinstance(shape[0], tuple):
new_shape: tuple[int, ...] = shape[0] new_shape: tuple[int, ...] = shape[0]