PandaReachDense-v2 deep-reinforcement-learning reinforcement-learning stable-baselines3

A2C Agent playing PandaReachDense-v2

This is a trained model of a A2C agent playing PandaReachDense-v2 using the stable-baselines3 library.

Usage (with Stable-baselines3)


import pybullet_envs
import panda_gym
import gym

import os

from huggingface_sb3 import load_from_hub, package_to_hub

from stable_baselines3 import A2C
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines3.common.env_util import make_vec_env

from huggingface_hub import notebook_login

notebook_login()
!git config --global credential.helper store


package_to_hub(
    model=model,
    model_name=f"a2c-{env_id}",
    model_architecture="A2C",
    env_id=env_id,
    eval_env=eval_env,
    repo_id=f"Ryukijano/a2c-{env_id}", # Change the username
    commit_message="Initial commit",
)


import gym

env_id = "PandaReachDense-v2"

# Create the env
env = gym.make(env_id)

# Get the state space and action space
s_size = env.observation_space.shape
a_size = env.action_space


print("_____OBSERVATION SPACE_____ \n")
print("The State Space is: ", s_size)
print("Sample observation", env.observation_space.sample()) # Get a random observation

# 1 - 2
env_id = "PandaReachDense-v2"
env = make_vec_env(env_id, n_envs=100)

# 3
env = VecNormalize(env, norm_obs=True, norm_reward=False, clip_obs=10.)

# 4
model = A2C(policy = "MultiInputPolicy",
            env = env,
            device = "cuda",
            verbose=1)
# 5
model.learn(1_000_000)


# 6
model_name = "a2c-PandaReachDense-v2"; 
model.save(model_name)
env.save("vec_normalize.pkl")

# 7
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize

# Load the saved statistics
eval_env = DummyVecEnv([lambda: gym.make("PandaReachDense-v2")])
eval_env = VecNormalize.load("vec_normalize.pkl", eval_env)

#  do not update them at test time
eval_env.training = False
# reward normalization is not needed at test time
eval_env.norm_reward = False

# Load the agent
model = A2C.load(model_name)

mean_reward, std_reward = evaluate_policy(model, eval_env)

print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}")

# 8
package_to_hub(
    model=model,
    model_name=f"a2c-{env_id}",
    model_architecture="A2C",
    env_id=env_id,
    eval_env=eval_env,
    repo_id=f"Ryukijano/a2c-{env_id}", # TODO: Change the username
    commit_message="Initial commit"
...