forked from pytorch/rl
-
Notifications
You must be signed in to change notification settings - Fork 0
/
common.py
1473 lines (1282 loc) · 58.5 KB
/
common.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
from copy import deepcopy
from typing import Any, Callable, Dict, Iterator, Optional, Union
import numpy as np
import torch
import torch.nn as nn
from tensordict.tensordict import TensorDictBase
from torchrl._utils import prod, seed_generator
from torchrl.data.tensor_specs import (
CompositeSpec,
DiscreteTensorSpec,
TensorSpec,
UnboundedContinuousTensorSpec,
)
from torchrl.data.utils import DEVICE_TYPING
from torchrl.envs.utils import get_available_libraries, step_mdp
LIBRARIES = get_available_libraries()
def _tensor_to_np(t):
return t.detach().cpu().numpy()
dtype_map = {
torch.float: np.float32,
torch.double: np.float64,
torch.bool: bool,
}
class EnvMetaData:
"""A class for environment meta-data storage and passing in multiprocessed settings."""
def __init__(
self,
tensordict: TensorDictBase,
specs: CompositeSpec,
batch_size: torch.Size,
env_str: str,
device: torch.device,
batch_locked: bool = True,
):
self.device = device
self.tensordict = tensordict
self.specs = specs
self.batch_size = batch_size
self.env_str = env_str
self.batch_locked = batch_locked
@property
def tensordict(self):
return self._tensordict.to(self.device)
@property
def specs(self):
return self._specs.to(self.device)
@tensordict.setter
def tensordict(self, value: TensorDictBase):
self._tensordict = value.to("cpu")
@specs.setter
def specs(self, value: CompositeSpec):
self._specs = value.to("cpu")
@staticmethod
def metadata_from_env(env) -> EnvMetaData:
tensordict = env.fake_tensordict().clone()
tensordict.set("_reset", torch.zeros_like(tensordict.get(env.done_key)))
specs = env.specs.to("cpu")
batch_size = env.batch_size
env_str = str(env)
device = env.device
specs = specs.to("cpu")
batch_locked = env.batch_locked
return EnvMetaData(tensordict, specs, batch_size, env_str, device, batch_locked)
def expand(self, *size: int) -> EnvMetaData:
tensordict = self.tensordict.expand(*size).to_tensordict()
batch_size = torch.Size(list(size))
return EnvMetaData(
tensordict,
self.specs.expand(*size),
batch_size,
self.env_str,
self.device,
self.batch_locked,
)
def clone(self):
return EnvMetaData(
self.tensordict.clone(),
self.specs.clone(),
torch.Size([*self.batch_size]),
deepcopy(self.env_str),
self.device,
self.batch_locked,
)
def to(self, device: DEVICE_TYPING) -> EnvMetaData:
tensordict = self.tensordict.contiguous().to(device)
specs = self.specs.to(device)
return EnvMetaData(
tensordict, specs, self.batch_size, self.env_str, device, self.batch_locked
)
class EnvBase(nn.Module, metaclass=abc.ABCMeta):
"""Abstract environment parent class.
Properties:
observation_spec (CompositeSpec): sampling spec of the observations. Must be a
:class:`torchrl.data.CompositeSpec` instance. The keys listed in the
spec are directly accessible after reset.
In TorchRL, even though they are not properly speaking "observations"
all info, states, results of transforms etc. are stored in the
observation_spec. Therefore, "observation_spec" should be thought as
a generic data container for environment outputs that are not done
or reward data.
reward_spec (TensorSpec): the (leaf) spec of the reward. If the reward
is nested within a tensordict, its location can be accessed via
the ``reward_key`` attribute:
>>> # accessing reward spec:
>>> reward_spec = env.reward_spec
>>> reward_spec = env.output_spec['_reward_spec'][env.reward_key]
>>> # accessing reward:
>>> reward = env.fake_tensordict()[('next', *env.reward_key)]
done_spec (TensorSpec): the (leaf) spec of the done. If the done
is nested within a tensordict, its location can be accessed via
the ``done_key`` attribute.
>>> # accessing done spec:
>>> done_spec = env.done_spec
>>> done_spec = env.output_spec['_done_spec'][env.done_key]
>>> # accessing done:
>>> done = env.fake_tensordict()[('next', *env.done_key)]
action_spec (TensorSpec): the ampling spec of the actions. This attribute
is contained in input_spec.
>>> # accessing action spec:
>>> action_spec = env.action_spec
>>> action_spec = env.input_spec['_action_spec'][env.action_key]
>>> # accessing action:
>>> action = env.fake_tensordict()[env.action_key]
output_spec (CompositeSpec): The container for all output specs (reward,
done and observation).
input_spec (CompositeSpec): the container for all input specs (actions
and possibly others).
batch_size (torch.Size): number of environments contained in the instance;
device (torch.device): device where the env input and output are expected to live
run_type_checks (bool): if ``True``, the observation and reward dtypes
will be compared against their respective spec and an exception
will be raised if they don't match.
Defaults to False.
.. note::
The usage of ``done_key``, ``reward_key`` and ``action_key`` is aimed at
facilitating the custom placement of done, reward and action data within
the tensordict structures produced and read by the environment.
In most cases, these attributes can be ignored and the default values
(``"done"``, ``"reward"`` and ``"action"``) can be used.
Methods:
step (TensorDictBase -> TensorDictBase): step in the environment
reset (TensorDictBase, optional -> TensorDictBase): reset the environment
set_seed (int -> int): sets the seed of the environment
rand_step (TensorDictBase, optional -> TensorDictBase): random step given the action spec
rollout (Callable, ... -> TensorDictBase): executes a rollout in the environment with the given policy (or random
steps if no policy is provided)
Examples:
>>> from torchrl.envs.libs.gym import GymEnv
>>> env = GymEnv("Pendulum-v1")
>>> env.batch_size # how many envs are run at once
torch.Size([])
>>> env.input_spec
CompositeSpec(
action: BoundedTensorSpec(
shape=torch.Size([1]),
space=ContinuousBox(
minimum=Tensor(shape=torch.Size([1]), device=cpu, dtype=torch.float32, contiguous=True),
maximum=Tensor(shape=torch.Size([1]), device=cpu, dtype=torch.float32, contiguous=True)),
device=cpu,
dtype=torch.float32,
domain=continuous), device=cpu, shape=torch.Size([]))
>>> env.action_spec
BoundedTensorSpec(
shape=torch.Size([1]),
space=ContinuousBox(
minimum=Tensor(shape=torch.Size([1]), device=cpu, dtype=torch.float32, contiguous=True),
maximum=Tensor(shape=torch.Size([1]), device=cpu, dtype=torch.float32, contiguous=True)),
device=cpu,
dtype=torch.float32,
domain=continuous)
>>> env.observation_spec
CompositeSpec(
observation: BoundedTensorSpec(
shape=torch.Size([3]),
space=ContinuousBox(
minimum=Tensor(shape=torch.Size([3]), device=cpu, dtype=torch.float32, contiguous=True),
maximum=Tensor(shape=torch.Size([3]), device=cpu, dtype=torch.float32, contiguous=True)),
device=cpu,
dtype=torch.float32,
domain=continuous), device=cpu, shape=torch.Size([]))
>>> env.reward_spec
UnboundedContinuousTensorSpec(
shape=torch.Size([1]),
space=None,
device=cpu,
dtype=torch.float32,
domain=continuous)
>>> env.done_spec
DiscreteTensorSpec(
shape=torch.Size([1]),
space=DiscreteBox(n=2),
device=cpu,
dtype=torch.bool,
domain=discrete)
>>> # the output_spec contains all the expected outputs
>>> env.output_spec
CompositeSpec(
observation: CompositeSpec(
observation: BoundedTensorSpec(
shape=torch.Size([3]),
space=ContinuousBox(
minimum=Tensor(shape=torch.Size([3]), device=cpu, dtype=torch.float32, contiguous=True),
maximum=Tensor(shape=torch.Size([3]), device=cpu, dtype=torch.float32, contiguous=True)),
device=cpu,
dtype=torch.float32,
domain=continuous), device=cpu, shape=torch.Size([])),
reward: CompositeSpec(
reward: UnboundedContinuousTensorSpec(
shape=torch.Size([1]),
space=None,
device=cpu,
dtype=torch.float32,
domain=continuous), device=cpu, shape=torch.Size([])),
done: CompositeSpec(
done: DiscreteTensorSpec(
shape=torch.Size([1]),
space=DiscreteBox(n=2),
device=cpu,
dtype=torch.bool,
domain=discrete), device=cpu, shape=torch.Size([])), device=cpu, shape=torch.Size([]))
"""
def __init__(
self,
device: DEVICE_TYPING = "cpu",
dtype: Optional[Union[torch.dtype, np.dtype]] = None,
batch_size: Optional[torch.Size] = None,
run_type_checks: bool = False,
):
self.__dict__["_done_key"] = None
self.__dict__["_reward_key"] = None
self.__dict__["_action_key"] = None
if device is not None:
self.__dict__["_device"] = torch.device(device)
output_spec = self.__dict__.get("_output_spec", None)
if output_spec is not None:
self.__dict__["_output_spec"] = output_spec.to(self.device)
input_spec = self.__dict__.get("_input_spec", None)
if input_spec is not None:
self.__dict__["_input_spec"] = input_spec.to(self.device)
super().__init__()
self.dtype = dtype_map.get(dtype, dtype)
if "is_closed" not in self.__dir__():
self.is_closed = True
if batch_size is not None:
# we want an error to be raised if we pass batch_size but
# it's already been set
self.batch_size = torch.Size(batch_size)
self._run_type_checks = run_type_checks
@classmethod
def __new__(cls, *args, _inplace_update=False, _batch_locked=True, **kwargs):
# inplace update will write tensors in-place on the provided tensordict.
# This is risky, especially if gradients need to be passed (in-place copy
# for tensors that are part of computational graphs will result in an error).
# It can also lead to inconsistencies when calling rollout.
cls._inplace_update = _inplace_update
cls._batch_locked = _batch_locked
cls._device = None
# cached in_keys to be excluded from update when calling step
cls._cache_in_keys = None
# We may assign _input_spec to the cls, but it must be assigned to the instance
# we pull it off, and place it back where it belongs
_input_spec = None
if hasattr(cls, "_input_spec"):
_input_spec = cls._input_spec.clone()
delattr(cls, "_input_spec")
_output_spec = None
if hasattr(cls, "_output_spec"):
_output_spec = cls._output_spec.clone()
delattr(cls, "_output_spec")
env = super().__new__(cls)
if _input_spec is not None:
env.__dict__["_input_spec"] = _input_spec
if _output_spec is not None:
env.__dict__["_output_spec"] = _output_spec
return env
return super().__new__(cls)
def __setattr__(self, key, value):
if key in ("_input_spec", "_observation_spec", "_action_spec", "_reward_spec"):
raise AttributeError(
"To set an environment spec, please use `env.observation_spec = obs_spec` (without the leading"
" underscore)."
)
return super().__setattr__(key, value)
@property
def batch_locked(self) -> bool:
"""Whether the environnement can be used with a batch size different from the one it was initialized with or not.
If True, the env needs to be used with a tensordict having the same batch size as the env.
batch_locked is an immutable property.
"""
return self._batch_locked
@batch_locked.setter
def batch_locked(self, value: bool) -> None:
raise RuntimeError("batch_locked is a read-only property")
@property
def run_type_checks(self) -> bool:
return self._run_type_checks
@run_type_checks.setter
def run_type_checks(self, run_type_checks: bool) -> None:
self._run_type_checks = run_type_checks
@property
def batch_size(self) -> torch.Size:
_batch_size = getattr(self, "_batch_size", None)
if _batch_size is None:
_batch_size = self._batch_size = torch.Size([])
return _batch_size
@batch_size.setter
def batch_size(self, value: torch.Size) -> None:
self._batch_size = torch.Size(value)
if (
hasattr(self, "output_spec")
and self.output_spec.shape[: len(value)] != value
):
self.output_spec.unlock_()
self.output_spec.shape = value
self.output_spec.lock_()
if hasattr(self, "input_spec") and self.input_spec.shape[: len(value)] != value:
self.input_spec.unlock_()
self.input_spec.shape = value
self.input_spec.lock_()
@property
def device(self) -> torch.device:
device = self.__dict__.get("_device", None)
if device is None:
device = self.__dict__["_device"] = torch.device("cpu")
return device
@device.setter
def device(self, value: torch.device) -> None:
device = self.__dict__.get("_device", None)
if device is None:
self.__dict__["_device"] = value
return
raise RuntimeError("device cannot be set. Call env.to(device) instead.")
def ndimension(self):
return len(self.batch_size)
@property
def ndim(self):
return self.ndimension()
# Parent specs: input and output spec.
@property
def input_spec(self) -> TensorSpec:
input_spec = self.__dict__.get("_input_spec", None)
if input_spec is None:
input_spec = CompositeSpec(
_state_spec=None,
shape=self.batch_size,
device=self.device,
).lock_()
self.__dict__["_input_spec"] = input_spec
return input_spec
@input_spec.setter
def input_spec(self, value: TensorSpec) -> None:
raise RuntimeError("input_spec is protected.")
@property
def output_spec(self) -> TensorSpec:
output_spec = self.__dict__.get("_output_spec", None)
if output_spec is None:
output_spec = CompositeSpec(
shape=self.batch_size,
device=self.device,
).lock_()
self.__dict__["_output_spec"] = output_spec
return output_spec
@output_spec.setter
def output_spec(self, value: TensorSpec) -> None:
raise RuntimeError("output_spec is protected.")
# Action spec
def _get_action_key(self):
keys = self.input_spec["_action_spec"].keys(True, True)
for key in keys:
# the first key is the action
if not isinstance(key, tuple):
key = (key,)
break
else:
raise AttributeError("Could not find action spec")
self.__dict__["_action_key"] = key
return key
@property
def action_key(self):
"""The action key of an environment.
By default, non-nested keys are stored in the 'action' key.
If the action is in a nested tensordict, this property will return its
location.
"""
out = self._action_key
if out is None:
out = self._get_action_key()
return out
# Action spec: action specs belong to input_spec
@property
def action_spec(self) -> TensorSpec:
"""The ``action`` leaf spec.
This property will always return the leaf spec of the action attribute,
which can be accessed in a typical rollout via
>>> fake_td = env.fake_tensordict() # a typical tensordict
>>> action = fake_td[env.action_key]
This property is mutable.
"""
try:
action_spec = self.input_spec["_action_spec"]
except (KeyError, AttributeError):
raise KeyError("Failed to find the action_spec.")
try:
out = action_spec[self.action_key]
except KeyError:
# the key may have changed
raise KeyError(
"The action_key attribute seems to have changed. "
"This occurs when a action_spec is updated without "
"calling `env.action_spec = new_spec`. "
"Make sure you rely on this type of command "
"to set the action and other specs."
)
return out
@action_spec.setter
def action_spec(self, value: TensorSpec) -> None:
try:
self.input_spec.unlock_()
device = self.input_spec.device
try:
delattr(self, "_action_key")
except AttributeError:
pass
if isinstance(value, CompositeSpec):
for _ in value.values(True, True): # noqa: B007
break
else:
raise RuntimeError(
"An empty CompositeSpec was passed for the action spec. "
"This is currently not permitted."
)
else:
value = CompositeSpec(
action=value.to(device), shape=self.batch_size, device=device
)
self.input_spec["_action_spec"] = value.to(device)
self._get_action_key()
finally:
self.input_spec.lock_()
# Reward spec
def _get_reward_key(self):
keys = self.output_spec["_reward_spec"].keys(True, True)
for key in keys:
# the first key is the reward
if not isinstance(key, tuple):
key = (key,)
break
else:
raise AttributeError("Could not find reward spec")
self.__dict__["_reward_key"] = key
return key
@property
def reward_key(self):
"""The reward key of an environment.
By default, non-nested keys are stored in the ``'reward'`` entry.
If the reward is in a nested tensordict, this property will return its
location.
"""
out = self._reward_key
if out is None:
out = self._get_reward_key()
return out
# Done spec: reward specs belong to output_spec
@property
def reward_spec(self) -> TensorSpec:
"""The ``reward`` leaf spec.
This property will always return the leaf spec of the reward attribute,
which can be accessed in a typical rollout via
>>> fake_td = env.fake_tensordict() # a typical tensordict
>>> reward = fake_td[("next", *env.reward_key)]
This property is mutable.
"""
try:
reward_spec = self.output_spec["_reward_spec"]
except (KeyError, AttributeError):
# populate the "reward" entry
# this will be raised if there is not _reward_spec (unlikely) or no reward_key
# Since output_spec is lazily populated with an empty composite spec for
# reward_spec, the second case is much more likely to occur.
self.reward_spec = out = UnboundedContinuousTensorSpec(
shape=(*self.batch_size, 1),
device=self.device,
)
reward_spec = self.output_spec["_reward_spec"]
finally:
try:
out = reward_spec[self.reward_key]
except KeyError:
# the key may have changed
raise KeyError(
"The reward_key attribute seems to have changed. "
"This occurs when a reward_spec is updated without "
"calling `env.reward_spec = new_spec`. "
"Make sure you rely on this type of command "
"to set the reward and other specs."
)
return out
@reward_spec.setter
def reward_spec(self, value: TensorSpec) -> None:
try:
self.output_spec.unlock_()
device = self.output_spec.device
try:
delattr(self, "_reward_key")
except AttributeError:
pass
if not hasattr(value, "shape"):
raise TypeError(
f"reward_spec of type {type(value)} do not have a shape "
f"attribute."
)
if value.shape[: len(self.batch_size)] != self.batch_size:
raise ValueError(
"The value of spec.shape must match the env batch size."
)
if isinstance(value, CompositeSpec):
for nestedval in value.values(True, True): # noqa: B007
break
else:
raise RuntimeError(
"An empty CompositeSpec was passed for the reward spec. "
"This is currently not permitted."
)
else:
nestedval = value
value = CompositeSpec(
reward=value.to(device), shape=self.batch_size, device=device
)
if len(nestedval.shape) == 0:
raise RuntimeError(
"the reward_spec shape cannot be empty (this error"
" usually comes from trying to set a reward_spec"
" with a null number of dimensions. Try using a multidimensional"
" spec instead, for instance with a singleton dimension at the tail)."
)
self.output_spec["_reward_spec"] = value.to(device)
self._get_reward_key()
finally:
self.output_spec.lock_()
# done spec
def _get_done_key(self):
keys = self.output_spec["_done_spec"].keys(True, True)
for key in keys:
# the first key is the reward
if not isinstance(key, tuple):
key = (key,)
break
else:
raise AttributeError(
f"Could not find done spec: {self.output_spec['_done_spec']}"
)
self.__dict__["_done_key"] = key
return key
@property
def done_key(self):
"""The done key of an environment.
By default, non-nested keys are stored in the ``'done'`` entry.
If the done is in a nested tensordict, this property will return its
location.
"""
out = self._done_key
if out is None:
out = self._get_done_key()
return out
# Done spec: done specs belong to output_spec
@property
def done_spec(self) -> TensorSpec:
"""The ``done`` leaf spec.
This property will always return the leaf spec of the done attribute,
which can be accessed in a typical rollout via
>>> fake_td = env.fake_tensordict() # a typical tensordict
>>> done = fake_td[("next", *env.done_key)]
This property is mutable.
"""
try:
done_spec = self.output_spec["_done_spec"]
except (KeyError, AttributeError):
# populate the "done" entry
# this will be raised if there is not _done_spec (unlikely) or no done_key
# Since output_spec is lazily populated with an empty composite spec for
# done_spec, the second case is much more likely to occur.
self.done_spec = DiscreteTensorSpec(
n=2, shape=(*self.batch_size, 1), dtype=torch.bool, device=self.device
)
done_spec = self.output_spec["_done_spec"]
finally:
try:
out = done_spec[self.done_key]
except KeyError:
# the key may have changed
raise KeyError(
"The done_key attribute seems to have changed. "
"This occurs when a done_spec is updated without "
"calling `env.done_spec = new_spec`. "
"Make sure you rely on this type of command "
"to set the done and other specs."
)
return out
@done_spec.setter
def done_spec(self, value: TensorSpec) -> None:
try:
self.output_spec.unlock_()
device = self.output_spec.device
try:
delattr(self, "_done_key")
except AttributeError:
pass
if not hasattr(value, "shape"):
raise TypeError(
f"done_spec of type {type(value)} do not have a shape "
f"attribute."
)
if value.shape[: len(self.batch_size)] != self.batch_size:
raise ValueError(
"The value of spec.shape must match the env batch size."
)
if isinstance(value, CompositeSpec):
for nestedval in value.values(True, True): # noqa: B007
break
else:
raise RuntimeError(
"An empty CompositeSpec was passed for the done spec. "
"This is currently not permitted."
)
else:
nestedval = value
value = CompositeSpec(
done=value.to(device), shape=self.batch_size, device=device
)
if len(nestedval.shape) == 0:
raise RuntimeError(
"the done_spec shape cannot be empty (this error"
" usually comes from trying to set a done_spec"
" with a null number of dimensions. Try using a multidimensional"
" spec instead, for instance with a singleton dimension at the tail)."
)
if len(list(value.keys())) == 0:
raise RuntimeError
self.output_spec["_done_spec"] = value.to(device)
self._get_done_key()
finally:
self.output_spec.lock_()
# observation spec: observation specs belong to output_spec
@property
def observation_spec(self) -> CompositeSpec:
observation_spec = self.output_spec["_observation_spec"]
if observation_spec is None:
observation_spec = CompositeSpec(shape=self.batch_size, device=self.device)
self.output_spec.unlock_()
self.output_spec["_observation_spec"] = observation_spec
self.output_spec.lock_()
return observation_spec
@observation_spec.setter
def observation_spec(self, value: TensorSpec) -> None:
try:
self.output_spec.unlock_()
device = self.output_spec.device
if not isinstance(value, CompositeSpec):
raise TypeError("The type of an observation_spec must be Composite.")
elif value.shape[: len(self.batch_size)] != self.batch_size:
raise ValueError(
f"The value of spec.shape ({value.shape}) must match the env batch size ({self.batch_size})."
)
if value.shape[: len(self.batch_size)] != self.batch_size:
raise ValueError(
f"The value of spec.shape ({value.shape}) must match the env batch size ({self.batch_size})."
)
self.output_spec["_observation_spec"] = value.to(device)
finally:
self.output_spec.lock_()
# state spec: state specs belong to input_spec
@property
def state_spec(self) -> CompositeSpec:
state_spec = self.input_spec["_state_spec"]
if state_spec is None:
state_spec = CompositeSpec(shape=self.batch_size, device=self.device)
self.input_spec.unlock_()
self.input_spec["_state_spec"] = state_spec
self.input_spec.lock_()
return state_spec
@state_spec.setter
def state_spec(self, value: CompositeSpec) -> None:
try:
self.input_spec.unlock_()
if value is None:
self.input_spec["_state_spec"] = CompositeSpec(
device=self.device, shape=self.batch_size
)
else:
device = self.input_spec.device
if not isinstance(value, CompositeSpec):
raise TypeError("The type of an state_spec must be Composite.")
elif value.shape[: len(self.batch_size)] != self.batch_size:
raise ValueError(
f"The value of spec.shape ({value.shape}) must match the env batch size ({self.batch_size})."
)
if value.shape[: len(self.batch_size)] != self.batch_size:
raise ValueError(
f"The value of spec.shape ({value.shape}) must match the env batch size ({self.batch_size})."
)
self.input_spec["_state_spec"] = value.to(device)
finally:
self.input_spec.lock_()
def step(self, tensordict: TensorDictBase) -> TensorDictBase:
"""Makes a step in the environment.
Step accepts a single argument, tensordict, which usually carries an 'action' key which indicates the action
to be taken.
Step will call an out-place private method, _step, which is the method to be re-written by EnvBase subclasses.
Args:
tensordict (TensorDictBase): Tensordict containing the action to be taken.
Returns:
the input tensordict, modified in place with the resulting observations, done state and reward
(+ others if needed).
"""
# sanity check
self._assert_tensordict_shape(tensordict)
tensordict_out = self._step(tensordict)
# this tensordict should contain a "next" key
try:
next_tensordict_out = tensordict_out.get("next")
except KeyError:
raise RuntimeError(
"The value returned by env._step must be a tensordict where the "
"values at t+1 have been written under a 'next' entry. This "
f"tensordict couldn't be found in the output, got: {tensordict_out}."
)
if tensordict_out is tensordict:
raise RuntimeError(
"EnvBase._step should return outplace changes to the input "
"tensordict. Consider emptying the TensorDict first (e.g. tensordict.empty() or "
"tensordict.select()) inside _step before writing new tensors onto this new instance."
)
# TODO: Refactor this using reward spec
reward = next_tensordict_out.get(self.reward_key)
# unsqueeze rewards if needed
# the input tensordict may have more leading dimensions than the batch_size
# e.g. in model-based contexts.
batch_size = self.batch_size
dims = len(batch_size)
leading_batch_size = (
next_tensordict_out.batch_size[:-dims]
if dims
else next_tensordict_out.shape
)
expected_reward_shape = torch.Size(
[*leading_batch_size, *self.reward_spec.shape]
)
actual_reward_shape = reward.shape
if actual_reward_shape != expected_reward_shape:
reward = reward.view(expected_reward_shape)
next_tensordict_out.set(self.reward_key, reward)
# TODO: Refactor this using done spec
done = next_tensordict_out.get(self.done_key)
# unsqueeze done if needed
expected_done_shape = torch.Size([*leading_batch_size, *self.done_spec.shape])
actual_done_shape = done.shape
if actual_done_shape != expected_done_shape:
done = done.view(expected_done_shape)
next_tensordict_out.set(self.done_key, done)
tensordict_out.set("next", next_tensordict_out)
if self.run_type_checks:
for key in self._select_observation_keys(tensordict_out):
obs = tensordict_out.get(key)
self.observation_spec.type_check(obs, key)
if (
next_tensordict_out.get(self.reward_key).dtype
is not self.reward_spec.dtype
):
raise TypeError(
f"expected reward.dtype to be {self.reward_spec.dtype} "
f"but got {tensordict_out.get(self.reward_key).dtype}"
)
if next_tensordict_out.get(self.done_key).dtype is not self.done_spec.dtype:
raise TypeError(
f"expected done.dtype to be torch.bool but got {tensordict_out.get(self.done_key).dtype}"
)
# tensordict could already have a "next" key
tensordict.update(tensordict_out)
return tensordict
def _get_in_keys_to_exclude(self, tensordict):
if self._cache_in_keys is None:
self._cache_in_keys = list(
set(self.input_spec.keys(True)).intersection(
tensordict.keys(True, True)
)
)
return self._cache_in_keys
def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
raise NotImplementedError("EnvBase.forward is not implemented")
@abc.abstractmethod
def _step(
self,
tensordict: TensorDictBase,
) -> TensorDictBase:
raise NotImplementedError
@abc.abstractmethod
def _reset(self, tensordict: TensorDictBase, **kwargs) -> TensorDictBase:
raise NotImplementedError
def reset(
self,
tensordict: Optional[TensorDictBase] = None,
**kwargs,
) -> TensorDictBase:
"""Resets the environment.
As for step and _step, only the private method :obj:`_reset` should be overwritten by EnvBase subclasses.
Args:
tensordict (TensorDictBase, optional): tensordict to be used to contain the resulting new observation.
In some cases, this input can also be used to pass argument to the reset function.
kwargs (optional): other arguments to be passed to the native
reset function.
Returns:
a tensordict (or the input tensordict, if any), modified in place with the resulting observations.
"""
if tensordict is not None and "_reset" in tensordict.keys():
self._assert_tensordict_shape(tensordict)
_reset = tensordict.get("_reset")
if _reset.shape[-len(self.done_spec.shape) :] != self.done_spec.shape:
raise RuntimeError(
"_reset flag in tensordict should follow env.done_spec"
)
else:
_reset = None
tensordict_reset = self._reset(tensordict, **kwargs)
if tensordict_reset.device != self.device:
tensordict_reset = tensordict_reset.to(self.device)
if tensordict_reset is tensordict:
raise RuntimeError(
"EnvBase._reset should return outplace changes to the input "
"tensordict. Consider emptying the TensorDict first (e.g. tensordict.empty() or "
"tensordict.select()) inside _reset before writing new tensors onto this new instance."
)
if not isinstance(tensordict_reset, TensorDictBase):
raise RuntimeError(
f"env._reset returned an object of type {type(tensordict_reset)} but a TensorDict was expected."
)
if len(self.batch_size):
leading_dim = tensordict_reset.shape[: -len(self.batch_size)]
else:
leading_dim = tensordict_reset.shape
if self.done_spec is not None and self.done_key not in tensordict_reset.keys(
True, True
):
tensordict_reset.set(
self.done_key,
self.done_spec.zero(leading_dim),
)
if (_reset is None and tensordict_reset.get(self.done_key).any()) or (
_reset is not None and tensordict_reset.get(self.done_key)[_reset].any()
):
raise RuntimeError(
f"Env {self} was done after reset on specified '_reset' dimensions. This is (currently) not allowed."
)
if tensordict is not None:
tensordict.update(tensordict_reset)
else:
tensordict = tensordict_reset
return tensordict
def numel(self) -> int:
return prod(self.batch_size)
def set_seed(
self, seed: Optional[int] = None, static_seed: bool = False
) -> Optional[int]:
"""Sets the seed of the environment and returns the next seed to be used (which is the input seed if a single environment is present).
Args:
seed (int): seed to be set
static_seed (bool, optional): if ``True``, the seed is not incremented.
Defaults to False
Returns:
integer representing the "next seed": i.e. the seed that should be
used for another environment if created concomittently to this environment.
"""
if seed is not None: