How to use python api for denoising

Hello,

I am currently using Blender2.81a python API to render the barbershop demo on a server. Although I have set the sampling number to 3000, the output images are very noisy.

I tried to set use_denoising with:

bpy.context.scene.view_layers[‘View Layer’].cycles.use_denoising = True

However, when I start rendering, it prints:

Fra:1 Mem:5062.57M (0.00M, Peak 5481.01M) | Time:03:00.34 | Remaining:01:31.03 | Mem:2138.12M, Peak:2138.12M | Scene, RenderLayer | Rendered 0/2 Tiles, Sample 28/32, Denoised 0 tiles

and the output images are still very noisy.

So, I was wondering how could I use the denoising API correctly. The following is my python script for your consideration.

import bpy
import os
import numpy as np

# Remove all the cameras in the scene.
bpy.ops.object.select_all(action='DESELECT')
for i, item in enumerate(bpy.data.objects):
    if item.type == 'CAMERA':
        bpy.data.objects[i].select_set(True)
        bpy.ops.object.delete()

# Create a new camera. Like every newly created object, the camera is automatically assigned to     `bpy.context.object`.
bpy.ops.object.camera_add()
cam = bpy.context.object

# Rename the new camera (not necessary).
cam.name = 'Camera_360'

# Turn the camera into an omnidirectional one.
cam.data.type = 'PANO'
cam.data.cycles.panorama_type = 'EQUIRECTANGULAR'

# Set the rendering range: every object between the two bounds below will be rendered.
cam.data.clip_start = 0.0
cam.data.clip_end = 1000.0
# The upper bound should be specified for each blender project:
# `1000` may represent a large distance in one project and a small one somewhere else.

# Camera resolution (e.g., (720, 480), (1920, 1080)).
resolutions = [(1024, 512)]

# Camera position/s in the scene.
camera_coordinates = [(1.5, 6.0, 1)]

# Define the camera field of view.
cam.data.cycles.latitude_min = -np.pi/2
cam.data.cycles.latitude_max = np.pi/2
cam.data.cycles.longitude_min = -np.pi
cam.data.cycles.longitude_max = np.pi

# Define the camera rotation.
# The rotation follows the rule of the right hand.
# TODO: Check the previous statement.
cam.rotation_euler[0] = np.pi/2      # Along x.
cam.rotation_euler[1] = 0            # Along y.
cam.rotation_euler[2] = 0            # Along z.

# Specify the device.
computation_type = 'CUDA'
gpu_id = (0,)

# Set the number of rendering samples.
bpy.context.scene.cycles.samples = 3000

# Select the computing device.
prefs = bpy.context.preferences.addons['cycles'].preferences
devices = prefs.get_devices()
if computation_type == 'CUDA':
    bpy.context.scene.cycles.device = 'GPU'
    prefs.compute_device_type = 'CUDA'
for i, gpu in enumerate(devices[0]):
    gpu.use = (i in gpu_id)

# Assign the new camera to the scene.
bpy.context.scene.camera = cam

# Activate the use of nodes.
bpy.context.scene.use_nodes = True

# Render an image for any pair ((width, height), (x, y, z)).
for width, height in resolutions:
    for x, y, z in camera_coordinates:
    
        # Set the camera parameters.
        bpy.context.scene.render.resolution_percentage = 100
        bpy.context.scene.render.resolution_x = width
        bpy.context.scene.render.resolution_y = height
        bpy.context.scene.render.use_compositing = True
        bpy.context.scene.view_layers["RenderLayer"].cycles.use_denoising = True
        bpy.context.scene.camera.location.x = x
        bpy.context.scene.camera.location.y = y
        bpy.context.scene.camera.location.z = z
        tree = bpy.context.scene.node_tree
        links = tree.links
        rl = tree.nodes.new(type="CompositorNodeRLayers")
    
        # Depth map.
        fileDepthOutput = tree.nodes.new(type="CompositorNodeOutputFile")
        fileDepthOutput.format.file_format = 'OPEN_EXR'
        fileDepthOutput.base_path = 'outputImages/{w}_{h}'.format(w=width, h=height)
        fileDepthId = 'test_{x}_{y}_{z}_{w}_{h}_depth_'.format(x=x, y=y, z=z, w=width, h=height)
        fileDepthPath = '{}/{}.exr'.format(fileDepthOutput.base_path, fileDepthId)
        fileDepthOutputSocket = fileDepthOutput.file_slots.new(fileDepthId)
        links.new(rl.outputs['Depth'], fileDepthOutputSocket)
    
        # Texture.
        fileTextureOutput = tree.nodes.new(type="CompositorNodeOutputFile")
        fileTextureOutput.format.file_format = 'PNG'
        fileTextureOutput.base_path = 'outputImages/{w}_{h}'.format(w=width, h=height)
        fileTextureOutputId = 'test_{x}_{y}_{z}_{w}_{h}_'.format(x=x, y=y, z=z, w=width, h=height)
        fileTextureOutputPath = '{}/{}.png'.format(fileTextureOutput.base_path, fileTextureOutputId)
        fileTextureOutputSocket = fileTextureOutput.file_slots.new(fileTextureOutputId)
        links.new(rl.outputs['Image'], fileTextureOutputSocket)
        
        # Launch the rendering.
        bpy.ops.render.render(write_still=False)
            
        # Clean the created nodes.
        bpy.context.scene.node_tree.nodes.remove(fileTextureOutput)
        bpy.context.scene.node_tree.nodes.remove(fileDepthOutput)

Any suggestions would be greatly appreciated.

Best regards,
Zhantao