mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2026-03-17 08:34:07 +00:00
convert : support contiguous method on lora tensors (#20489)
This commit is contained in:
@@ -128,6 +128,12 @@ class LoraTorchTensor:
|
||||
assert dim is None
|
||||
return self.shape
|
||||
|
||||
def contiguous(self) -> LoraTorchTensor:
|
||||
return LoraTorchTensor(
|
||||
self._lora_A.contiguous(),
|
||||
self._lora_B.contiguous(),
|
||||
)
|
||||
|
||||
def reshape(self, *shape: int | tuple[int, ...]) -> LoraTorchTensor:
|
||||
if isinstance(shape[0], tuple):
|
||||
new_shape: tuple[int, ...] = shape[0]
|
||||
|
||||
Reference in New Issue
Block a user