Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
zzmtsvv authored Sep 14, 2023
1 parent 78a51b8 commit 64297ca
Show file tree
Hide file tree
Showing 4 changed files with 448 additions and 0 deletions.
37 changes: 37 additions & 0 deletions xql/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from dataclasses import dataclass
import torch


@dataclass
class xql_config:
# Experiment
device: str = "cuda" if torch.cuda.is_available() else "cpu"
dataset_name: str = "halfcheetah-medium-v2"
seed: int = 42

state_dim: int = 17
action_dim: int = 6

actor_lr: float = 3e-4
critic_lr: float = 3e-4
value_func_lr: float = 3e-4
batch_size: int = 1024
buffer_size: int = 1000000
discount: float = 0.99
hidden_dim: int = 256
max_action: float = 1.0
max_timesteps: int = int(1e6)
tau: float = 5e-3

value_update_freq: int = 1000
beta: float = 1.0 # 10.0
value_noise_std: float = 0.1
exp_adv_temperature: float = 0.1
advantage_max: float = 100.0
critic_delta_loss: float = 20.0

grad_clip: float = 7.0

project: str = "XQL"
group: str = dataset_name
name: str = dataset_name + "_" + str(seed)
164 changes: 164 additions & 0 deletions xql/dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
import torch
import numpy as np
from typing import List, Tuple
import os


class ReplayBuffer:
def __init__(self,
state_dim: int,
action_dim: int,
buffer_size: int = 1000000) -> None:

self.state_dim = state_dim
self.action_dim = action_dim
self.buffer_size = buffer_size
self.pointer = 0
self.size = 0

device = "cpu"
self.device = device

self.states = torch.zeros((buffer_size, state_dim), dtype=torch.float32, device=device)
self.actions = torch.zeros((buffer_size, action_dim), dtype=torch.float32, device=device)
self.rewards = torch.zeros((buffer_size, 1), dtype=torch.float32, device=device)
self.next_states = torch.zeros((buffer_size, state_dim), dtype=torch.float32, device=device)
self.dones = torch.zeros((buffer_size, 1), dtype=torch.float32, device=device)

# i/o order: state, action, reward, next_state, done

def from_json(self, json_file: str):
import json

if not json_file.endswith('.json'):
json_file = json_file + '.json'

json_file = os.path.join("json_datasets", json_file)
output = dict()

with open(json_file) as f:
dataset = json.load(f)

for k, v in dataset.items():
v = np.array(v)
if k != "terminals":
v = v.astype(np.float32)

output[k] = v

self.from_d4rl(output)

def get_moments(self) -> Tuple[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
state_mean, state_std = self.states.mean(dim=0), self.states.std(dim=0)
action_mean, action_std = self.actions.mean(dim=0), self.actions.std(dim=0)

return (state_mean, state_std), (action_mean, action_std)

@staticmethod
def to_tensor(data: np.ndarray, device=None) -> torch.Tensor:
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

return torch.tensor(data, dtype=torch.float32, device=device)

def sample(self, batch_size: int):
indexes = np.random.randint(0, self.size, size=batch_size)

return (
self.states[indexes],
self.actions[indexes],
self.rewards[indexes],
self.next_states[indexes],
self.dones[indexes]
)

def from_d4rl(self, dataset):
if self.size:
print("Warning: loading data into non-empty buffer")
n_transitions = dataset["observations"].shape[0]

if n_transitions < self.buffer_size:
self.states[:n_transitions] = self.to_tensor(dataset["observations"][-n_transitions:], self.device)
self.actions[:n_transitions] = self.to_tensor(dataset["actions"][-n_transitions:], self.device)
self.next_states[:n_transitions] = self.to_tensor(dataset["next_observations"][-n_transitions:], self.device)
self.rewards[:n_transitions] = self.to_tensor(dataset["rewards"][-n_transitions:].reshape(-1, 1), self.device)
self.dones[:n_transitions] = self.to_tensor(dataset["terminals"][-n_transitions:].reshape(-1, 1), self.device)

else:
self.buffer_size = n_transitions

self.states = self.to_tensor(dataset["observations"][-n_transitions:], self.device)
self.actions = self.to_tensor(dataset["actions"][-n_transitions:])
self.next_states = self.to_tensor(dataset["next_observations"][-n_transitions:], self.device)
self.rewards = self.to_tensor(dataset["rewards"][-n_transitions:].reshape(-1, 1), self.device)
self.dones = self.to_tensor(dataset["terminals"][-n_transitions:].reshape(-1, 1), self.device)

self.size = n_transitions
self.pointer = n_transitions % self.buffer_size

def from_d4rl_finetune(self, dataset):
raise NotImplementedError()

def normalize_states(self, eps=1e-3):
mean = self.states.mean(0, keepdim=True)
std = self.states.std(0, keepdim=True) + eps
self.states = (self.states - mean) / std
self.next_states = (self.next_states - mean) / std
return mean, std

def clip(self, eps=1e-5):
self.actions = torch.clip(self.actions, - 1 + eps, 1 - eps)

def add_transition(self,
state: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
next_state: torch.Tensor,
done: torch.Tensor):
if not isinstance(state, torch.Tensor):
state = self.to_tensor(state, self.device)
action = self.to_tensor(action, self.device)
reward = self.to_tensor(reward, self.device)
next_state = self.to_tensor(next_state, self.device)
done = self.to_tensor(done, self.device)


self.states[self.pointer] = state
self.actions[self.pointer] = action
self.rewards[self.pointer] = reward
self.next_states[self.pointer] = next_state
self.dones[self.pointer] = done

self.pointer = (self.pointer + 1) % self.buffer_size
self.size = min(self.size + 1, self.buffer_size)

def add_batch(self,
states: List[torch.Tensor],
actions: List[torch.Tensor],
rewards: List[torch.Tensor],
next_states: List[torch.Tensor],
dones: List[torch.Tensor]):
for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):
self.add_transition(state, action, reward, next_state, done)

@staticmethod
def dataset_stats(dataset):
episode_returns = []
returns = 0
episode_length = 0

for reward, done in zip(dataset["rewards"], dataset["terminals"]):
if done:
episode_returns.append(returns)
returns = 0
episode_length = 0
else:
episode_length += 1
returns += reward
if episode_length == 1000:
episode_returns.append(returns)
returns = 0
episode_length = 0

episode_returns = np.array(episode_returns)
return episode_returns.mean(), episode_returns.std()
Loading

0 comments on commit 64297ca

Please sign in to comment.